hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a190fd12c879dcd2af13b17cd522eb61ccbb41a
| 3,655
|
py
|
Python
|
sent_levl/pre_process.py
|
tejasvaidhyadev/extract_covid_entity
|
6dcd1711d889d9e43af70955c98d3f7d2ce2a3f9
|
[
"MIT"
] | 6
|
2020-09-24T04:31:08.000Z
|
2021-12-28T12:49:21.000Z
|
sent_levl/pre_process.py
|
tejasvaidhyadev/extract_covid_entity
|
6dcd1711d889d9e43af70955c98d3f7d2ce2a3f9
|
[
"MIT"
] | null | null | null |
sent_levl/pre_process.py
|
tejasvaidhyadev/extract_covid_entity
|
6dcd1711d889d9e43af70955c98d3f7d2ce2a3f9
|
[
"MIT"
] | 3
|
2020-12-22T04:49:06.000Z
|
2021-04-22T07:18:44.000Z
|
# Input: data/wtwt_ids.json and data/scrapped_full/*
# Output: data/pre_processed.json
# Read all existing scrapped files and preprocess, tokenize the full_text part
# Read wtwt_ids.json and for each tweet_id, append toked_text. Save into pre_processed.json
import json
import os
import sys
import glob
import re
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
FILES = ['can_not_test-add_text.jsonl', 'cure_and_prevention-add_text.jsonl',
'death-add_text.jsonl', 'positive-add_text.jsonl', 'negative-add_text.jsonl']
FILES = ['../data/' + FILE for FILE in FILES
EMOJI_PATTERN = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
text_processor = TextPreProcessor(
normalize=['url', 'email', 'percent', 'money', 'phone', 'user',
'time', 'url', 'date', 'number'],
annotate={"hashtag", "allcaps", "elongated", "repeated",
'emphasis', 'censored'},
fix_html=True,
segmenter="twitter",
corrector="twitter",
unpack_hashtags=True,
unpack_contractions=True,
spell_correct_elong=True,
tokenizer=SocialTokenizer(lowercase=True).tokenize,
dicts=[emoticons]
)
REMOVE_TAGS = [
"<emphasis>", "<kiss>", "<repeated>", "<laugh>", "<allcaps>",
"</allcaps>", "<angel>", "<elongated>", "<tong>", "<annoyed>",
"<censored>", "<happy>", "<percent>", "<wink>",
"<headdesk>", "<surprise>", "<date>", "<time>", "<url>",
"<sad>", "<email>", "<phone>", "<hashtag>", "</hashtag>",
"<number>", "<money>"
]
ADD_TO_GLOVE = ["<user>"]
PUNCTS = '''()-[]{;}\,<>/@#'%"^*_~.?!| +:=`'''
def decontracted(phrase):
phrase = re.sub(r"won\'t", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
phrase = re.sub(r"\'m", " am", phrase)
phrase = re.sub(r"1st", " first ", phrase)
phrase = re.sub(r"2nd", " second ", phrase)
phrase = re.sub(r"3rd", " third ", phrase)
phrase = re.sub(r"—", " ", phrase)
phrase = re.sub(r"-", " ", phrase)
return phrase
def pre_process_single(tweet):
tweet_toked_text = []
de_emojified_text = tweet.encode('ascii', 'ignore').decode('ascii')
de_emojified_text = EMOJI_PATTERN.sub(r' ', de_emojified_text)
de_emojified_text = decontracted(de_emojified_text)
company_normalize_text = de_emojified_text
tokens = text_processor.pre_process_doc(company_normalize_text)
for token in tokens:
if token in REMOVE_TAGS:
pass
else:
tweet_toked_text.append(token)
if len(tweet_toked_text) < 1:
pass#print(tweet, tokens, t_id)
return tweet_toked_text
for file in FILES:
savepath = "preproced_" + file[:-1]
fin = open(file, 'r')
lines = fin.read().split("\n")
event_data = []
for line in lines:
if line.strip() == "":
continue
j = json.loads(line)
j['prepro'] = " ".join(pre_process_single(j['text']))
event_data.append(j.copy())
fin.close()
fout = open(savepath, "w+")
json.dump(event_data, fout)
fout.close()
| 31.508621
| 93
| 0.633105
|
4a191076d05e6427e03cf4357f3fedee5e0a4630
| 986
|
py
|
Python
|
python/modules/softmax.py
|
RachelBlin/keras-retinanet
|
248b5463ce663a35350b222fd469b506b37ce118
|
[
"Apache-2.0"
] | 2
|
2020-11-12T11:58:53.000Z
|
2021-04-14T12:24:42.000Z
|
python/modules/softmax.py
|
RachelBlin/keras-retinanet
|
248b5463ce663a35350b222fd469b506b37ce118
|
[
"Apache-2.0"
] | null | null | null |
python/modules/softmax.py
|
RachelBlin/keras-retinanet
|
248b5463ce663a35350b222fd469b506b37ce118
|
[
"Apache-2.0"
] | 3
|
2019-11-14T14:01:58.000Z
|
2021-04-14T12:24:50.000Z
|
'''
@author: Sebastian Lapuschkin
@author: Gregoire Montavon
@maintainer: Sebastian Lapuschkin
@contact: sebastian.lapuschkin@hhi.fraunhofer.de, wojciech.samek@hhi.fraunhofer.de
@date: 14.08.2015
@version: 1.2+
@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek
@license : BSD-2-Clause
'''
import numpy as np
from .module import Module
# -------------------------------
# Softmax layer
# -------------------------------
class SoftMax(Module):
'''
Softmax Layer
'''
def __init__(self):
Module.__init__(self)
def forward(self,X):
self.X = X
self.Y = np.exp(X) / np.exp(X).sum(axis=1,keepdims=True)
return self.Y
def lrp(self,R,*args,**kwargs):
# just propagate R further down.
# makes sure subroutines never get called.
#return R*self.X
return R
def clean(self):
self.X = None
self.Y = None
| 24.65
| 133
| 0.609533
|
4a1910cc531c431313cf64a1dc301c2751979929
| 1,101
|
py
|
Python
|
app/ch13-validation/starter/pypi_org/data/db_session.py
|
callumrollo/data-driven-web-apps-with-flask
|
550096856145e7cc6bba1d3df7f28d923e417531
|
[
"MIT"
] | null | null | null |
app/ch13-validation/starter/pypi_org/data/db_session.py
|
callumrollo/data-driven-web-apps-with-flask
|
550096856145e7cc6bba1d3df7f28d923e417531
|
[
"MIT"
] | null | null | null |
app/ch13-validation/starter/pypi_org/data/db_session.py
|
callumrollo/data-driven-web-apps-with-flask
|
550096856145e7cc6bba1d3df7f28d923e417531
|
[
"MIT"
] | null | null | null |
import sqlalchemy as sa
import sqlalchemy.orm as orm
from sqlalchemy.orm import Session
from pypi_org.data.modelbase import SqlAlchemyBase
__factory = None
def global_init(db_file: str):
global __factory
if __factory:
return
if not db_file or not db_file.strip():
raise Exception("You must specify a db file.")
conn_str = 'sqlite:///' + db_file.strip()
print("Connecting to DB with {}".format(conn_str))
# Adding check_same_thread = False after the recording. This can be an issue about
# creating / owner thread when cleaning up sessions, etc. This is a sqlite restriction
# that we probably don't care about in this example.
engine = sa.create_engine(conn_str, echo=False, connect_args={"check_same_thread": False})
__factory = orm.sessionmaker(bind=engine)
# noinspection PyUnresolvedReferences
import pypi_org.data.__all_models
SqlAlchemyBase.metadata.create_all(engine)
def create_session() -> Session:
global __factory
session: Session = __factory()
session.expire_on_commit = False
return session
| 27.525
| 94
| 0.726612
|
4a1911302c9949eed5f66f4610f16c0c65cddcc2
| 425
|
py
|
Python
|
part-requests/test-requests.py
|
wuljchange/interesting_python
|
3fdf9f7f17f7b361be030bb4eadf7aab889b15fe
|
[
"MIT"
] | 1
|
2019-03-29T14:09:43.000Z
|
2019-03-29T14:09:43.000Z
|
part-requests/test-requests.py
|
wuljchange/interesting_python
|
3fdf9f7f17f7b361be030bb4eadf7aab889b15fe
|
[
"MIT"
] | null | null | null |
part-requests/test-requests.py
|
wuljchange/interesting_python
|
3fdf9f7f17f7b361be030bb4eadf7aab889b15fe
|
[
"MIT"
] | null | null | null |
# ----------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2019-11-08 11:42
# @Author : 吴林江
# @Email : wulinjiang1@kingsoft.com
# @File : test-requests.py
# ----------------------------------------------
import requests
if __name__ == "__main__":
url = "https://cn.bing.com/"
resp = requests.get("https://"+"cn.bing.com", verify=True)
print(resp.status_code)
print(resp.url)
| 28.333333
| 62
| 0.470588
|
4a1911da145e4fe69d1a35e2c1e07cce11c2c49d
| 3,164
|
py
|
Python
|
api/tests/opentrons/config/test_reset.py
|
knownmed/opentrons
|
d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c
|
[
"Apache-2.0"
] | 235
|
2017-10-27T20:37:27.000Z
|
2022-03-30T14:09:49.000Z
|
api/tests/opentrons/config/test_reset.py
|
knownmed/opentrons
|
d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c
|
[
"Apache-2.0"
] | 8,425
|
2017-10-26T15:25:43.000Z
|
2022-03-31T23:54:26.000Z
|
api/tests/opentrons/config/test_reset.py
|
knownmed/opentrons
|
d02eb3c6cbf9f1c8c05c5e9e1dac30a92a8c5e6c
|
[
"Apache-2.0"
] | 130
|
2017-11-09T21:02:37.000Z
|
2022-03-15T18:01:24.000Z
|
from unittest.mock import patch
import pytest
from opentrons.config import reset
@pytest.fixture
def mock_reset_boot_scripts():
with patch("opentrons.config.reset.reset_boot_scripts") as m:
yield m
@pytest.fixture
def mock_reset_labware_calibration():
with patch("opentrons.config.reset.reset_labware_calibration") as m:
yield m
@pytest.fixture
def mock_labware():
with patch("opentrons.config.reset.delete") as m:
yield m
@pytest.fixture
def mock_reset_pipette_offset():
with patch("opentrons.config.reset.reset_pipette_offset") as m:
yield m
@pytest.fixture
def mock_reset_deck_calibration():
with patch("opentrons.config.reset.reset_deck_calibration") as m:
yield m
@pytest.fixture
def mock_reset_tip_length_calibrations():
with patch("opentrons.config.reset.reset_tip_length_calibrations") as m:
yield m
@pytest.fixture
def mock_cal_storage_delete():
with patch("opentrons.config.reset.delete", autospec=True) as m:
yield m
def test_reset_empty_set(
mock_reset_boot_scripts,
mock_reset_labware_calibration,
mock_reset_pipette_offset,
mock_reset_deck_calibration,
mock_reset_tip_length_calibrations,
):
reset.reset(set())
mock_reset_labware_calibration.assert_not_called()
mock_reset_boot_scripts.assert_not_called()
mock_reset_pipette_offset.assert_not_called()
mock_reset_deck_calibration.assert_not_called()
mock_reset_tip_length_calibrations.assert_not_called()
def test_reset_all_set(
mock_reset_boot_scripts,
mock_reset_labware_calibration,
mock_reset_pipette_offset,
mock_reset_deck_calibration,
mock_reset_tip_length_calibrations,
):
reset.reset(
{
reset.ResetOptionId.boot_scripts,
reset.ResetOptionId.labware_calibration,
reset.ResetOptionId.deck_calibration,
reset.ResetOptionId.pipette_offset,
reset.ResetOptionId.tip_length_calibrations,
}
)
mock_reset_labware_calibration.assert_called_once()
mock_reset_boot_scripts.assert_called_once()
mock_reset_pipette_offset.assert_called_once()
mock_reset_deck_calibration.assert_called_once()
mock_reset_tip_length_calibrations.assert_called_once()
def test_labware_calibration_reset(mock_labware):
reset.reset_labware_calibration()
# Check side effecting function calls
mock_labware.clear_calibrations.assert_called_once()
def test_deck_calibration_reset(mock_cal_storage_delete):
reset.reset_deck_calibration()
mock_cal_storage_delete.delete_robot_deck_attitude.assert_called_once()
mock_cal_storage_delete.clear_pipette_offset_calibrations.assert_called_once()
def test_tip_length_calibrations_reset(mock_cal_storage_delete):
reset.reset_tip_length_calibrations()
mock_cal_storage_delete.clear_tip_length_calibration.assert_called_once()
mock_cal_storage_delete.clear_pipette_offset_calibrations.assert_called_once()
def test_pipette_offset_reset(mock_cal_storage_delete):
reset.reset_pipette_offset()
mock_cal_storage_delete.clear_pipette_offset_calibrations.assert_called_once()
| 29.570093
| 82
| 0.787927
|
4a1912e15a013de4a71dca73cef8096fa8ad444d
| 37,168
|
py
|
Python
|
python/oneflow/nn/modules/loss.py
|
Oneflow-Inc/oneflow
|
b105cacd1e3b0b21bdec1a824a2c125390a2a665
|
[
"Apache-2.0"
] | 3,285
|
2020-07-31T05:51:22.000Z
|
2022-03-31T15:20:16.000Z
|
python/oneflow/nn/modules/loss.py
|
Oneflow-Inc/oneflow
|
b105cacd1e3b0b21bdec1a824a2c125390a2a665
|
[
"Apache-2.0"
] | 2,417
|
2020-07-31T06:28:58.000Z
|
2022-03-31T23:04:14.000Z
|
python/oneflow/nn/modules/loss.py
|
Oneflow-Inc/oneflow
|
b105cacd1e3b0b21bdec1a824a2c125390a2a665
|
[
"Apache-2.0"
] | 520
|
2020-07-31T05:52:42.000Z
|
2022-03-29T02:38:11.000Z
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import oneflow as flow
from oneflow.framework.tensor import Tensor
from oneflow.nn.module import Module
from oneflow.nn.modules.constant import _ConstantBase
class _Loss(Module):
def __init__(self, reduction: str = "mean") -> None:
super(_Loss, self).__init__()
assert reduction in ["none", "mean", "sum"]
self.reduction = reduction
class _WeightedLoss(_Loss):
def __init__(
self, weight: Optional[Tensor] = None, reduction: str = "mean"
) -> None:
super(_WeightedLoss, self).__init__(reduction=reduction)
self.weight = weight
class L1Loss(_Loss):
"""This operator computes the L1 Loss between each element in `input` and `target`.
The equation is:
if reduction = "none":
.. math::
output = |Target - Input|
if reduction = "mean":
.. math::
output = \\frac{1}{n}\\sum_{i=1}^n|Target_i - Input_i|
if reduction = "sum":
.. math::
output = \\sum_{i=1}^n|Target_i - Input_i|
Args:
input (oneflow.Tensor): The input Tensor.
target (oneflow.Tensor): The target Tensor.
reduction (str): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
Returns:
oneflow.Tensor: The result Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor([[1, 1, 1], [2, 2, 2], [7, 7, 7]], dtype = flow.float32)
>>> target = flow.tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype = flow.float32)
>>> m = flow.nn.L1Loss(reduction="none")
>>> out = m(input, target)
>>> out
tensor([[3., 3., 3.],
[2., 2., 2.],
[3., 3., 3.]], dtype=oneflow.float32)
>>> m_mean = flow.nn.L1Loss(reduction="mean")
>>> out = m_mean(input, target)
>>> out
tensor(2.6667, dtype=oneflow.float32)
>>> m_mean = flow.nn.L1Loss(reduction="sum")
>>> out = m_mean(input, target)
>>> out
tensor(24., dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean") -> None:
super(L1Loss, self).__init__(reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.l1_loss(input, target, self.reduction)
class CrossEntropyLoss(_WeightedLoss):
"""This criterion combines :class:`~flow.nn.LogSoftmax` and :class:`~flow.nn.NLLLoss` in one single class.
It is useful when training a classification problem with `C` classes.
The `input` is expected to contain raw, unnormalized scores for each class.
`input` has to be a Tensor of size either :math:`(minibatch, C)` or
:math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \\geq 1` for the `K`-dimensional case (described later).
This criterion expects a class index in the range :math:`[0, C-1]` as the
`target` for each value of a 1D tensor of size `minibatch`;
The loss can be described as:
.. math::
\\text{loss}(x, class) = -\\log\\left(\\frac{\\exp(x[class])}{\\sum_j \\exp(x[j])}\\right)
= -x[class] + \\log\\left(\\sum_j \\exp(x[j])\\right)
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \\geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below).
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Default: ``'mean'``
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(
... [[-0.1664078, -1.7256707, -0.14690138],
... [-0.21474946, 0.53737473, 0.99684894],
... [-1.135804, -0.50371903, 0.7645404]], dtype=flow.float32)
>>> target = flow.tensor(np.array([0, 1, 2]), dtype=flow.int32)
>>> out = flow.nn.CrossEntropyLoss(reduction="none")(input, target)
>>> out
tensor([0.8020, 1.1167, 0.3583], dtype=oneflow.float32)
>>> out_sum = flow.nn.CrossEntropyLoss(reduction="sum")(input, target)
>>> out_sum
tensor(2.2769, dtype=oneflow.float32)
>>> out_mean = flow.nn.CrossEntropyLoss(reduction="mean")(input, target)
>>> out_mean
tensor(0.7590, dtype=oneflow.float32)
"""
def __init__(
self,
weight: Optional[Tensor] = None,
ignore_index: int = -100,
reduction: str = "mean",
) -> None:
super(CrossEntropyLoss, self).__init__(weight, reduction)
self.ignore_index = ignore_index
def forward(self, input, target):
return flow._C.cross_entropy(
input, target, self.weight, self.ignore_index, self.reduction
)
class BCELoss(_WeightedLoss):
"""This operator computes the binary cross entropy loss.
The equation is:
if reduction = "none":
.. math::
out = -(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
if reduction = "mean":
.. math::
out = -\\frac{1}{n}\\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
if reduction = "sum":
.. math::
out = -\\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
Args:
weight (oneflow.Tensor, optional): The manual rescaling weight to the loss. Default to None, whose corresponding weight value is 1.
reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
Attention:
The input value must be in the range of (0, 1). Or the loss function may return `nan` value.
Returns:
oneflow.Tensor: The result Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.array([[1.2, 0.2, -0.3], [0.7, 0.6, -2]]).astype(np.float32))
>>> target = flow.Tensor(np.array([[0, 1, 0], [1, 0, 1]]).astype(np.float32))
>>> weight = flow.Tensor(np.array([[2, 2, 2], [2, 2, 2]]).astype(np.float32))
>>> activation = flow.nn.Sigmoid()
>>> sigmoid_input = activation(input)
>>> m = flow.nn.BCELoss(weight, reduction="none")
>>> out = m(sigmoid_input, target)
>>> out
tensor([[2.9266, 1.1963, 1.1087],
[0.8064, 2.0750, 4.2539]], dtype=oneflow.float32)
>>> m_sum = flow.nn.BCELoss(weight, reduction="sum")
>>> out = m_sum(sigmoid_input, target)
>>> out
tensor(12.3668, dtype=oneflow.float32)
>>> m_mean = flow.nn.BCELoss(weight, reduction="mean")
>>> out = m_mean(sigmoid_input, target)
>>> out
tensor(2.0611, dtype=oneflow.float32)
>>> m_none = flow.nn.BCELoss()
>>> out = m_none(sigmoid_input, target)
>>> out
tensor(1.0306, dtype=oneflow.float32)
"""
def __init__(
self, weight: Optional[Tensor] = None, reduction: str = "mean"
) -> None:
super(BCELoss, self).__init__(weight, reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.binary_cross_entropy_loss(
input, target, self.weight, self.reduction
)
class NLLLoss(_WeightedLoss):
""" The negative log likelihood loss. It is useful to train a classification
problem with `C` classes.
The `input` given through a forward call is expected to contain
log-probabilities of each class. `input` has to be a Tensor of size either
:math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \\geq 1` for the `K`-dimensional case (described later).
Obtaining log-probabilities in a neural network is easily achieved by
adding a `LogSoftmax` layer in the last layer of your network.
You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
layer.
The `target` that this loss expects should be a class index in the range :math:`[0, C-1]`
where `C = number of classes`;
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad
l_n = - w_{y_n} x_{n,y_n}, \\quad
w_{c} = \\mathbb{1},
where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and
:math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\\ell(x, y) = \\begin{cases}
\\sum_{n=1}^N \\frac{1}{N} l_n, &
\\text{if reduction} = \\text{`mean';}\\\\
\\sum_{n=1}^N l_n, &
\\text{if reduction} = \\text{`sum'.}
\\end{cases}
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \\geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below). In the case of images, it computes NLL loss per-pixel.
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Default: ``'mean'``
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(
... [[-0.1664078, -1.7256707, -0.14690138],
... [-0.21474946, 0.53737473, 0.99684894],
... [-1.135804, -0.50371903, 0.7645404]], dtype=flow.float32)
>>> target = flow.tensor(np.array([0, 1, 2]), dtype=flow.int32)
>>> m = flow.nn.NLLLoss(reduction="none")
>>> out = m(input, target)
>>> out
tensor([ 0.1664, -0.5374, -0.7645], dtype=oneflow.float32)
>>> m = flow.nn.NLLLoss(reduction="sum")
>>> out = m(input, target)
>>> out
tensor(-1.1355, dtype=oneflow.float32)
>>> m = flow.nn.NLLLoss(reduction="mean")
>>> out = m(input, target)
>>> out
tensor(-0.3785, dtype=oneflow.float32)
"""
def __init__(
self,
weight: Optional[Tensor] = None,
ignore_index: int = -100,
reduction: str = "mean",
) -> None:
super(NLLLoss, self).__init__(weight, reduction)
self.ignore_index = ignore_index
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.nll_loss(
input, target, self.weight, self.ignore_index, self.reduction
)
class KLDivLoss(_Loss):
"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.KLDivLoss.html?highlight=kldivloss#torch.nn.KLDivLoss
The Kullback-Leibler divergence loss measure
`Kullback-Leibler divergence`_ is a useful distance measure for continuous
distributions and is often useful when performing direct regression over
the space of (discretely sampled) continuous output distributions.
As with :class:`~torch.nn.NLLLoss`, the `input` given is expected to contain
*log-probabilities* and is not restricted to a 2D Tensor.
The targets are interpreted as *probabilities* by default, but could be considered
as *log-probabilities* with :attr:`log_target` set to ``True``.
This criterion expects a `target` `Tensor` of the same size as the
`input` `Tensor`.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
l(x,y) = L = \\{ l_1,\\dots,l_N \\}, \\quad
l_n = y_n \\cdot \\left( \\log y_n - x_n \\right)
where the index :math:`N` spans all dimensions of ``input`` and :math:`L` has the same
shape as ``input``. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then:
.. math::
\\ell(x, y) = \\begin{cases}
\\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';} \\\\
\\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}
\\end{cases}
In default :attr:`reduction` mode ``'mean'``, the losses are averaged for each minibatch over observations
**as well as** over dimensions. ``'batchmean'`` mode gives the correct KL divergence where losses
are averaged over batch dimension only. ``'mean'`` mode's behavior will be changed to the same as
``'batchmean'`` in the next major release.
.. _`kullback-leibler divergence`: https://en.wikipedia.org/wiki/Kullback-Leibler_divergence
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``.
``'none'``: no reduction will be applied.
``'batchmean'``: the sum of the output will be divided by batchsize.
``'sum'``: the output will be summed.
``'mean'``: the output will be divided by the number of elements in the output.
Default: ``'mean'``
log_target (bool, optional): Specifies whether `target` is passed in the log space.
Default: ``False``
.. note::
:attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use
:attr:`reduction` = ``'batchmean'`` which aligns with KL math definition.
In the next major release, ``'mean'`` will be changed to be the same as ``'batchmean'``.
Shape:
- Input: :math:`(N, *)` where :math:`*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar by default. If :attr:``reduction`` is ``'none'``, then :math:`(N, *)`,
the same shape as the input
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor([-0.9021705, 0.08798598, 1.04686249], dtype=flow.float32)
>>> target = flow.tensor([1.22386942, -0.89729659, 0.01615712], dtype=flow.float32)
>>> m = flow.nn.KLDivLoss(reduction="none", log_target=False)
>>> out = m(input, target)
>>> out
tensor([ 1.3514, 0.0000, -0.0836], dtype=oneflow.float32)
>>> m = flow.nn.KLDivLoss(reduction="mean", log_target=False)
>>> out = m(input, target)
>>> out
tensor(0.4226, dtype=oneflow.float32)
>>> m = flow.nn.KLDivLoss(reduction="sum", log_target=True)
>>> out = m(input, target)
>>> out
tensor(5.7801, dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean", log_target: bool = False) -> None:
super(KLDivLoss, self).__init__(reduction)
self.log_target = log_target
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.kl_div_loss(input, target, self.log_target, self.reduction)
class MSELoss(_Loss):
"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html?highlight=mseloss#torch.nn.MSELoss
Creates a criterion that measures the mean squared error (squared L2 norm) between
each element in the input :math:`x` and target :math:`y`.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad
l_n = \\left( x_n - y_n \\right)^2,
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then:
.. math::
\\ell(x, y) =
\\begin{cases}
\\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\
\\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}
\\end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The mean operation still operates over all the elements, and divides by :math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Shape:
- Input: :math:`(N, *)` where :math:`*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(
... [[-0.02557137, 0.03101675, 1.37493674],
... [0.25599439, -1.08372561, -0.21006816]], dtype=flow.float32)
>>> target = flow.tensor(
... [[-1.53105064, -0.68137555, 0.5931354],
... [-0.49158347, 0.93673637, 0.1324141]], dtype=flow.float32)
>>> m = flow.nn.MSELoss(reduction="none")
>>> out = m(input, target)
>>> out
tensor([[2.2665, 0.5075, 0.6112],
[0.5589, 4.0823, 0.1173]], dtype=oneflow.float32)
>>> m = flow.nn.MSELoss(reduction="mean")
>>> out = m(input, target)
>>> out
tensor(1.3573, dtype=oneflow.float32)
>>> m = flow.nn.MSELoss(reduction="sum")
>>> out = m(input, target)
>>> out
tensor(8.1436, dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean") -> None:
super(MSELoss, self).__init__(reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.mse_loss(input, target, self.reduction)
class MarginRankingLoss(_Loss):
"""Creates a criterion that measures the loss given
inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`,
and a label 1D mini-batch tensor :math:`y` (containing 1 or -1).
If :math:`y = 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for :math:`y = -1`.
The loss function for each sample in the mini-batch is:
.. math::
\\text{loss}(x1, x2, y) = \\max(0, -y * (x1 - x2) + \\text{margin})
Args:
margin (float, optional): Has a default value of :math:`0`.
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Shape:
- `x1` : :math:`(N, D)` where `N` is the batch size and `D` is the size of a sample.
- `x2` : :math:`(N, D)` where `N` is the batch size and `D` is the size of a sample.
- Target: :math:`(N)`
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x1 = flow.tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=flow.float32)
>>> x2 = flow.tensor(np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]), dtype=flow.float32)
>>> target = flow.tensor(np.array([[1, -1, 1],[-1, 1, -1], [1, 1, 1]]), dtype=flow.float32)
>>> m = flow.nn.MarginRankingLoss(margin =1.0, reduction="none")
>>> out = m(x1, x2, target)
>>> out
tensor([[2., 1., 0.],
[3., 0., 5.],
[0., 0., 0.]], dtype=oneflow.float32)
>>> m = flow.nn.MarginRankingLoss(margin = 0.3, reduction="sum")
>>> out = m(x1, x2, target)
>>> out
tensor(8.2000, dtype=oneflow.float32)
>>> m = flow.nn.MarginRankingLoss(margin = 10, reduction="mean")
>>> out = m(x1, x2, target)
>>> out
tensor(8.3333, dtype=oneflow.float32)
"""
def __init__(self, margin: float = 0.0, reduction: str = "mean") -> None:
super(MarginRankingLoss, self).__init__(reduction)
self.margin = margin
def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor:
return flow._C.margin_ranking_loss(
input1, input2, target, self.margin, self.reduction
)
class CTCLoss(_Loss):
"""The Connectionist Temporal Classification loss.
The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.CTCLoss.html#torch.nn.CTCLoss
Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the
probability of possible alignments of input to target, producing a loss value which is differentiable
with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which
limits the length of the target sequence such that it must be :math:`\\leq` the input length.
Args:
blank (int, optional): blank label. Default :math:`0`.
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: ``'mean'``
zero_infinity (bool, optional):
Whether to zero infinite losses and the associated gradients.
Default: ``False``
Infinite losses mainly occur when the inputs are too short
to be aligned to the targets.
Shape:
- Log_probs: Tensor of size :math:`(T, N, C)`,
where :math:`T = \\text{input length}`,
:math:`N = \\text{batch size}`, and
:math:`C = \\text{number of classes (including blank)}`.
- Targets: Tensor of size :math:`(N, S)` or
:math:`(\\operatorname{sum}(\\text{target_lengths}))`,
where :math:`N = \\text{batch size}` and
:math:`S = \\text{max target length, if shape is } (N, S)`.
It represent the target sequences. Each element in the target
sequence is a class index. And the target index cannot be blank (default=0).
In the :math:`(N, S)` form, targets are padded to the
length of the longest sequence, and stacked.
In the :math:`(\\operatorname{sum}(\\text{target_lengths}))` form,
the targets are assumed to be un-padded and
concatenated within 1 dimension.
- Input_lengths: Tuple or tensor of size :math:`(N)`,
where :math:`N = \\text{batch size}`. It represent the lengths of the
inputs (must each be :math:`\\leq T`). And the lengths are specified
for each sequence to achieve masking under the assumption that sequences
are padded to equal lengths.
- Target_lengths: Tuple or tensor of size :math:`(N)`,
where :math:`N = \\text{batch size}`. It represent lengths of the targets.
Lengths are specified for each sequence to achieve masking under the
assumption that sequences are padded to equal lengths. If target shape is
:math:`(N,S)`, target_lengths are effectively the stop index
:math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for
each target in a batch. Lengths must each be :math:`\\leq S`
If the targets are given as a 1d tensor that is the concatenation of individual
targets, the target_lengths must add up to the total length of the tensor.
Reference:
A. Graves et al.: Connectionist Temporal Classification:
Labelling Unsegmented Sequence Data with Recurrent Neural Networks:
https://www.cs.toronto.edu/~graves/icml_2006.pdf
For example:
.. code-block:: python
>>> import oneflow as flow
>>> log_probs = flow.tensor(
... [
... [[-1.1031, -0.7998, -1.5200], [-0.9808, -1.1363, -1.1908]],
... [[-1.2258, -1.0665, -1.0153], [-1.1135, -1.2331, -0.9671]],
... [[-1.3348, -0.6611, -1.5118], [-0.9823, -1.2355, -1.0941]],
... [[-1.3850, -1.3273, -0.7247], [-0.8235, -1.4783, -1.0994]],
... [[-0.9049, -0.8867, -1.6962], [-1.4938, -1.3630, -0.6547]],
... ], dtype=flow.float32)
>>> targets = flow.tensor([[1, 2, 2], [1, 2, 2]], dtype=flow.int32)
>>> input_lengths = flow.tensor([5, 5], dtype=flow.int32)
>>> target_lengths = flow.tensor([3, 3], dtype=flow.int32)
>>> loss_mean = flow.nn.CTCLoss()
>>> out = loss_mean(log_probs, targets, input_lengths, target_lengths)
>>> out
tensor(1.1376, dtype=oneflow.float32)
>>> loss_sum = flow.nn.CTCLoss(blank=0, reduction="sum")
>>> out = loss_sum(log_probs, targets, input_lengths, target_lengths)
>>> out
tensor(6.8257, dtype=oneflow.float32)
"""
def __init__(
self, blank: int = 0, reduction: str = "mean", zero_infinity: bool = False
) -> None:
super(CTCLoss, self).__init__(reduction)
self.blank = blank
self.zero_infinity = zero_infinity
def forward(
self,
log_probs: Tensor,
targets: Tensor,
input_lengths: Tensor,
target_lengths: Tensor,
) -> Tensor:
max_target_length = 0
if targets.ndim == 1:
max_target_length = target_lengths.max().item()
elif targets.ndim == 2:
max_target_length = targets.shape[1]
return flow._C.ctc_loss(
log_probs,
targets,
input_lengths,
target_lengths,
max_target_length,
self.blank,
self.zero_infinity,
self.reduction,
)
class BCEWithLogitsLoss(_WeightedLoss):
"""This operator combines the `Sigmoid` and `BCELoss` together. For numerical stability,
we apply some math tricks instead of using `Sigmoid` layer with `BCELoss`.
The equation is:
if :attr:`reduction` = ``"none"``:
.. math::
out = -weight*[Pos\\_weight*y*log\\sigma({x}) + (1-y)*log(1-\\sigma(x))]
if :attr:`reduction` = ``"mean"``:
.. math::
out = -\\frac{weight}{n}\\sum_{i=1}^n[Pos\\_weight*y*log\\sigma({x}) + (1-y)*log(1-\\sigma(x))]
if :attr:`reduction` = ``"sum"``:
.. math::
out = -weight*\\sum_{i=1}^n[Pos\\_weight*y*log\\sigma({x}) + (1-y)*log(1-\\sigma(x))]
Args:
weight (Tensor, optional): The manual rescaling weight to the loss. Default: ``None``
size_average (bool, optional): Deprecated (see :attr:`reduction`). Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). Default: ``True``
reduction (str, optional): The reduce type, it can be one of ``"none"``, ``"mean"``, ``"sum"``.
``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided
by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``"mean"``
pos_weight (Tensor, optional): The manual rescaling weight to the positive examples.
Default: ``None``
Shape:
- Input: :math:`(N,*)` where `*` means, any number of additional dimensions
- Target: :math:`(N,*)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``"none"``, then :math:`(N,*)`, same shape as input.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([[1.2, 0.2, -0.3], [0.7, 0.6, -2], [0.7, 0.6, -2]], dtype=flow.float32)
>>> target = flow.tensor([[0, 1, 0], [1, 0, 1], [1, 0, 1]], dtype=flow.float32)
>>> weight = flow.tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=flow.float32)
>>> pos_weight = flow.tensor([1.2, 1.3, 1.4], dtype=flow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="none")
>>> out = m(input, target)
>>> out
tensor([[2.9266, 1.5552, 1.1087],
[0.9676, 2.0750, 5.9554],
[0.9676, 2.0750, 5.9554]], dtype=oneflow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="mean")
>>> out = m(input, target)
>>> out
tensor(2.6207, dtype=oneflow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="sum")
>>> out = m(input, target)
>>> out
tensor(23.5865, dtype=oneflow.float32)
"""
def __init__(
self,
weight: Optional[Tensor] = None,
reduction: str = "mean",
pos_weight: Optional[Tensor] = None,
) -> None:
super(BCEWithLogitsLoss, self).__init__(weight, reduction)
self.reduction = reduction
self.pos_weight = pos_weight
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.binary_cross_entropy_with_logits_loss(
input, target, self.weight, self.pos_weight, self.reduction
)
class SmoothL1Loss(_Loss):
"""Creates a criterion that uses a squared term if the absolute
element-wise error falls below beta and an L1 term otherwise.
The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.SmoothL1Loss.html
It is less sensitive to outliers than :class:`torch.nn.MSELoss` and in some cases
prevents exploding gradients (e.g. see the paper `Fast R-CNN <https://openaccess.thecvf.com/content_iccv_2015/papers/Girshick_Fast_R-CNN_ICCV_2015_paper.pdf>`__ by Ross Girshick)..
For a batch of size :math:`N`, the unreduced loss can be described as:
.. math::
\\ell(x, y) = L = \\{l_1, ..., l_N\\}^T
with
.. math::
l_n = \\begin{cases}
0.5 (x_n - y_n)^2 / beta, & \\text{if } |x_n - y_n| < beta \\\\
|x_n - y_n| - 0.5 * beta, & \\text{otherwise }
\\end{cases}
If `reduction` is not `none`, then:
.. math::
\\ell(x, y) =
\\begin{cases}
\\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\
\\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}
\\end{cases}
.. note::
Smooth L1 loss can be seen as exactly :class:`L1Loss`, but with the :math:`|x - y| < beta`
portion replaced with a quadratic function such that its slope is 1 at :math:`|x - y| = beta`.
The quadratic segment smooths the L1 loss near :math:`|x - y| = 0`.
.. note::
Smooth L1 loss is closely related to :class:`HuberLoss`, being
equivalent to :math:`huber(x, y) / beta` (note that Smooth L1's beta hyper-parameter is
also known as delta for Huber). This leads to the following differences:
* As beta -> 0, Smooth L1 loss converges to :class:`L1Loss`, while :class:`HuberLoss`
converges to a constant 0 loss.
* As beta -> :math:`+\\infty`, Smooth L1 loss converges to a constant 0 loss, while
:class:`HuberLoss` converges to :class:`MSELoss`.
* For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1.
For :class:`HuberLoss`, the slope of the L1 segment is beta.
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
beta (float, optional): Specifies the threshold at which to change between L1 and L2 loss.
The value must be non-negative. Default: 1.0
Shape:
- Input: :math:`(N, *)` where :math:`*` means any number of additional dimensions
- Target: :math:`(N, *)`; same shape as the input
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`; same shape as the input
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([0.1, 0.4, 0.3, 0.5, 0.9]).astype(np.float32), dtype=flow.float32)
>>> y = flow.tensor(np.array([0.3, 0.9, 2.5, 0.4, 0.3]).astype(np.float32), dtype=flow.float32)
>>> m = flow.nn.SmoothL1Loss(reduction="none")
>>> out = m(x, y)
>>> out
tensor([0.0200, 0.1250, 1.7000, 0.0050, 0.1800], dtype=oneflow.float32)
>>> m = flow.nn.SmoothL1Loss(reduction="mean")
>>> out = m(x, y)
>>> out
tensor(0.4060, dtype=oneflow.float32)
>>> m = flow.nn.SmoothL1Loss(reduction="sum")
>>> out = m(x, y)
>>> out
tensor(2.0300, dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean", beta: float = 1.0) -> None:
super(SmoothL1Loss, self).__init__(reduction)
self.beta = beta
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.smooth_l1_loss(input, target, self.beta, self.reduction)
class CombinedMarginLoss(Module):
"""The operation implements "margin_softmax" in InsightFace:
https://github.com/deepinsight/insightface/blob/master/recognition/arcface_mxnet/train.py
The implementation of margin_softmax in InsightFace is composed of multiple operators.
We fuse them for speed up.
Args:
x (oneflow.Tensor): A Tensor
label (oneflow.Tensor): label with integer data type
m1 (float): loss m1 parameter
m2 (float): loss m2 parameter
m3 (float): loss m3 parameter
Returns:
oneflow.Tensor: A Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> np_x = np.array([[-0.7027179, 0.0230609], [-0.02721931, -0.16056311], [-0.4565852, -0.64471215]])
>>> np_label = np.array([0, 1, 1])
>>> x = flow.tensor(np_x, dtype=flow.float32)
>>> label = flow.tensor(np_label, dtype=flow.int32)
>>> loss_func = flow.nn.CombinedMarginLoss(0.3, 0.5, 0.4)
>>> out = loss_func(x, label)
>>> out
tensor([[-0.0423, 0.0231],
[-0.0272, 0.1237],
[-0.4566, -0.0204]], dtype=oneflow.float32)
"""
def __init__(self, m1: float = 1.0, m2: float = 0.0, m3: float = 0.0) -> None:
super().__init__()
self.m1 = m1
self.m2 = m2
self.m3 = m3
def forward(self, x: Tensor, label: Tensor) -> Tensor:
return flow._C.combined_margin_loss(
x, label, m1=self.m1, m2=self.m2, m3=self.m3
)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| 39.794433
| 184
| 0.591692
|
4a19139f67bb51c91164b0ab2d8f557151f0df03
| 6,727
|
py
|
Python
|
center_out_task/matdatainterface.py
|
catalystneuro/shenoy-lab-to-nwb
|
29dbcf94e98dcdf4b5f079e906742af0603e958e
|
[
"MIT"
] | null | null | null |
center_out_task/matdatainterface.py
|
catalystneuro/shenoy-lab-to-nwb
|
29dbcf94e98dcdf4b5f079e906742af0603e958e
|
[
"MIT"
] | null | null | null |
center_out_task/matdatainterface.py
|
catalystneuro/shenoy-lab-to-nwb
|
29dbcf94e98dcdf4b5f079e906742af0603e958e
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Union
import numpy as np
from nwb_conversion_tools.basedatainterface import BaseDataInterface
from nwb_conversion_tools.utils.json_schema import (
get_base_schema,
get_schema_from_hdmf_class,
get_schema_for_NWBFile,
get_schema_from_method_signature,
)
from pynwb import NWBFile, TimeSeries
from pynwb.base import DynamicTable
from pynwb.behavior import Position, SpatialSeries
from pynwb.epoch import TimeIntervals
from pynwb.misc import Units
from .matextractor import MatDataExtractor
PathType = Union[str, Path]
class COutMatDataInterface(BaseDataInterface):
def __init__(self, filename: PathType):
super().__init__()
self.file_path = Path(filename)
assert self.file_path.suffix == ".mat", "file_path should be a .mat"
assert self.file_path.exists(), "file_path does not exist"
self.mat_extractor = MatDataExtractor(self.file_path)
@classmethod
def get_source_schema(cls):
return get_schema_from_method_signature(cls.__init__)
@staticmethod
def _convert_schema_object_to_array(schema_to_convert):
base_schema = get_base_schema()
base_schema.update(type="array")
_ = base_schema.pop("properties")
base_schema["items"] = schema_to_convert
return base_schema
def get_metadata_schema(self):
metadata_schema = get_base_schema()
metadata_schema["required"] = [
"Behavior",
"Intervals",
"Units",
"Subject",
"NWBFile",
]
metadata_schema["properties"] = dict()
metadata_schema["properties"]["Behavior"] = get_base_schema()
metadata_schema["properties"]["NWBFile"] = get_schema_for_NWBFile()
metadata_schema["properties"]["Intervals"] = get_schema_from_hdmf_class(
TimeIntervals
)
dt_schema = get_base_schema(DynamicTable)
dt_schema["additionalProperties"] = True
metadata_schema["properties"]["Behavior"]["properties"] = dict(
Position=self._convert_schema_object_to_array(
get_schema_from_hdmf_class(SpatialSeries)
),
)
units_schema = get_schema_from_hdmf_class(Units)
units_schema["additionalProperties"] = True
metadata_schema["properties"]["Units"] = units_schema
return metadata_schema
def get_metadata(self):
metadata = dict(
Subject=dict(
sex="M",
species="Macaca mulatta",
subject_id=self.mat_extractor.subject_name,
),
NWBFile=dict(session_start_time=str(self.mat_extractor.session_start)),
Behavior=dict(
Position=[
dict(name="Eye", reference_frame="screen center"),
dict(name="Hand", reference_frame="screen center"),
dict(name="Cursor", reference_frame="screen center"),
]
),
Intervals=dict(name="trials"),
Units=dict(name="units"),
)
return metadata
def run_conversion(self, nwbfile: NWBFile, metadata: dict, **kwargs):
assert isinstance(nwbfile, NWBFile), "'nwbfile' should be of type pynwb.NWBFile"
beh_pos = self.mat_extractor.extract_behavioral_position()
stim_pos = self.mat_extractor.extract_stimulus()
trial_times = self.mat_extractor.get_trial_times()
trial_times_all = np.concatenate(trial_times)
task_data = self.mat_extractor.extract_task_data()
task_times_data = self.mat_extractor.extract_task_times()
spike_times = self.mat_extractor.extract_unit_spike_times()
# add behavior:
beh_mod = nwbfile.create_processing_module(
"behavior", "contains monkey movement data"
)
position_container = Position()
spatial_series_list = []
for beh in beh_pos:
args = dict(
timestamps=trial_times_all,
reference_frame="screen center",
conversion=np.nan,
)
spatial_series_list.append(
position_container.create_spatial_series(**beh, **args)
)
beh_mod.add(position_container)
# add stimulus:
nwbfile.add_stimulus(
TimeSeries(
name="juice_reward",
description="1 is when reward was presented",
data=stim_pos,
timestamps=trial_times_all,
unit="n.a.",
)
)
# add trials:
for col_details in task_data + task_times_data:
col_det = dict(
name=col_details["name"], description=col_details["description"]
)
if "index" in col_details:
col_det.update(index=col_details["index"])
nwbfile.add_trial_column(**col_det)
for trial_no in range(self.mat_extractor._no_trials):
col_details_dict = {
i["name"]: i["data"][trial_no] for i in task_data + task_times_data
}
col_details_dict.update(
start_time=trial_times[trial_no][0],
stop_time=trial_times[trial_no][-1],
timeseries=spatial_series_list,
)
nwbfile.add_trial(**col_details_dict)
if len(nwbfile.devices) == 0:
nwbfile.create_device(
name="Utah Electrode", description="192 channels microelectrode array"
)
if len(nwbfile.electrode_groups) == 0:
# add electrdoe groups:
nwbfile.create_electrode_group(
name="1",
description="array corresponding to device implanted at PMd",
location="Caudal, dorsal Pre-motor cortex, Left hemisphere",
device=nwbfile.devices["Utah Electrode"],
)
nwbfile.create_electrode_group(
name="2",
description="array corresponding to device implanted at M1",
location="M1 in Motor Cortex, left hemisphere",
device=nwbfile.devices["Utah Electrode"],
)
# add units:
for no, unit_sp_times in enumerate(spike_times):
elec_group = 1 if no > 95 else 0
nwbfile.add_unit(
spike_times=unit_sp_times,
electrodes=[no],
electrode_group=list(nwbfile.electrode_groups.values())[elec_group],
obs_intervals=np.array([trial_times[0][0], trial_times[-1][-1]])[
np.newaxis, :
],
)
| 38.44
| 88
| 0.603687
|
4a1913a08b980cfdbba5a48a14fdfc2c62285cdc
| 792
|
py
|
Python
|
tests/test_repeated_cookie_headers.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 53,007
|
2018-12-08T10:05:29.000Z
|
2022-03-31T23:30:02.000Z
|
tests/test_repeated_cookie_headers.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 4,155
|
2019-01-05T05:07:49.000Z
|
2022-03-31T21:25:38.000Z
|
tests/test_repeated_cookie_headers.py
|
Aryabhata-Rootspring/fastapi
|
f6237ad05a8468ac19c591181adad38d75372c46
|
[
"MIT"
] | 4,092
|
2018-12-09T16:21:00.000Z
|
2022-03-31T07:59:45.000Z
|
from fastapi import Depends, FastAPI, Response
from fastapi.testclient import TestClient
app = FastAPI()
def set_cookie(*, response: Response):
response.set_cookie("cookie-name", "cookie-value")
return {}
def set_indirect_cookie(*, dep: str = Depends(set_cookie)):
return dep
@app.get("/directCookie")
def get_direct_cookie(dep: str = Depends(set_cookie)):
return {"dep": dep}
@app.get("/indirectCookie")
def get_indirect_cookie(dep: str = Depends(set_indirect_cookie)):
return {"dep": dep}
client = TestClient(app)
def test_cookie_is_set_once():
direct_response = client.get("/directCookie")
indirect_response = client.get("/indirectCookie")
assert (
direct_response.headers["set-cookie"] == indirect_response.headers["set-cookie"]
)
| 22.628571
| 88
| 0.712121
|
4a1913a28629cb6092edda4a12f35ab500d43794
| 4,703
|
py
|
Python
|
nautobot/extras/plugins/utils.py
|
susanhooks/nautobot
|
bc3ef5958f0d5decb0be763342c790f26ff1e20e
|
[
"Apache-2.0"
] | null | null | null |
nautobot/extras/plugins/utils.py
|
susanhooks/nautobot
|
bc3ef5958f0d5decb0be763342c790f26ff1e20e
|
[
"Apache-2.0"
] | null | null | null |
nautobot/extras/plugins/utils.py
|
susanhooks/nautobot
|
bc3ef5958f0d5decb0be763342c790f26ff1e20e
|
[
"Apache-2.0"
] | null | null | null |
"""
Plugin utilities.
"""
import importlib.util
import logging
import sys
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
from .exceptions import PluginNotFound, PluginImproperlyConfigured
# Logging object
logger = logging.getLogger("nautobot.plugins")
def import_object(module_and_object):
"""
Import a specific object from a specific module by name, such as "nautobot.extras.plugins.utils.import_object".
Returns the imported object, or None if it doesn't exist.
"""
target_module_name, object_name = module_and_object.rsplit(".", 1)
module_hierarchy = target_module_name.split(".")
# Iterate through the module hierarchy, checking for the existence of each successive submodule.
# We have to do this rather than jumping directly to calling find_spec(target_module_name)
# because find_spec will raise a ModuleNotFoundError if any parent module of target_module_name does not exist.
module_name = ""
for module_component in module_hierarchy:
module_name = f"{module_name}.{module_component}" if module_name else module_component
spec = importlib.util.find_spec(module_name)
if spec is None:
# No such module
return None
# Okay, target_module_name exists. Load it if not already loaded
if target_module_name in sys.modules:
module = sys.modules[target_module_name]
else:
module = importlib.util.module_from_spec(spec)
sys.modules[target_module_name] = module
spec.loader.exec_module(module)
return getattr(module, object_name, None)
def load_plugins(settings):
"""Process plugins and log errors if they can't be loaded."""
for plugin_name in settings.PLUGINS:
# Attempt to load the plugin but let any errors bubble up.
load_plugin(plugin_name, settings)
def load_plugin(plugin_name, settings):
"""Process a single plugin or raise errors that get bubbled up."""
logger.debug(f"Loading {plugin_name}!")
# Import plugin module
try:
plugin = importlib.import_module(plugin_name)
except ModuleNotFoundError as err:
if getattr(err, "name") == plugin_name:
raise PluginNotFound(
f"Unable to import plugin {plugin_name}: Module not found. Check that the plugin module has been "
f"installed within the correct Python environment."
) from err
raise err
# Validate plugin config
try:
plugin_config = plugin.config
except AttributeError as err:
raise PluginImproperlyConfigured(
f"Plugin {plugin_name} does not provide a 'config' variable. This should be defined in the plugin's "
f"__init__.py file and point to the PluginConfig subclass."
) from err
# Validate user-provided configuration settings and assign defaults. Plugin
# validation that fails will stop before modifying any settings.
if plugin_name not in settings.PLUGINS_CONFIG:
settings.PLUGINS_CONFIG[plugin_name] = {}
plugin_config.validate(settings.PLUGINS_CONFIG[plugin_name], settings.VERSION)
# Plugin config is valid, so now we can and add to INSTALLED_APPS.
plugin_import_path = f"{plugin_config.__module__}.{plugin_config.__name__}"
if plugin_import_path not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(plugin_import_path)
# Include any extra installed apps provided by the plugin
# TODO(jathan): We won't be able to support advanced app-ordering concerns
# and if the time comes that we do, this will have to be rethought.
for plugin_installed_app in plugin_config.installed_apps:
if plugin_installed_app not in settings.INSTALLED_APPS:
settings.INSTALLED_APPS.append(plugin_installed_app)
# Include any extra middleware provided by the plugin
for middleware in plugin_config.middleware:
if middleware not in settings.MIDDLEWARE:
settings.MIDDLEWARE.append(middleware)
# Update caching configg
settings.CACHEOPS.update({f"{plugin_name}.{key}": value for key, value in plugin_config.caching_config.items()})
def get_sso_backend_name(social_auth_module):
"""
Return the name parameter of the social auth module defined in the module itself.
:param social_auth_module: The social auth python module to read the name parameter from
"""
try:
backend_class = import_string(social_auth_module)
except ImportError:
raise ImproperlyConfigured(f"Unable to import Social Auth Module {social_auth_module}.")
backend_name = backend_class.name
return backend_name
| 38.867769
| 116
| 0.724856
|
4a1913af16afa1c7cea2146b8fe72c8a0a14de25
| 837
|
py
|
Python
|
notes/migrations/0005_auto_20200314_1006.py
|
namratavalecha/notesea
|
5fb2133c0744bed45dfa106c428581035185f7db
|
[
"MIT"
] | null | null | null |
notes/migrations/0005_auto_20200314_1006.py
|
namratavalecha/notesea
|
5fb2133c0744bed45dfa106c428581035185f7db
|
[
"MIT"
] | 7
|
2021-03-19T01:00:25.000Z
|
2022-03-12T00:20:35.000Z
|
notes/migrations/0005_auto_20200314_1006.py
|
namratavalecha/notesea
|
5fb2133c0744bed45dfa106c428581035185f7db
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2020-03-14 10:06
from django.db import migrations, models
import django.db.models.deletion
import notes.models
class Migration(migrations.Migration):
dependencies = [
('notes', '0004_auto_20200314_0952'),
]
operations = [
migrations.RemoveField(
model_name='note',
name='image',
),
migrations.CreateModel(
name='Images',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to=notes.models.get_image_filename, verbose_name='Image')),
('note', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='notes.Note')),
],
),
]
| 29.892857
| 120
| 0.608124
|
4a191505655a8d4c57e7bb6429b6e767fdda9d6e
| 9,138
|
py
|
Python
|
node-transformer-deprecated/model_process.py
|
mandubian/pytorch-neural-ode
|
2d8a6e3a51b7446188ef4851c0d6620f603c9b72
|
[
"Apache-2.0"
] | 6
|
2020-09-01T13:50:42.000Z
|
2022-02-03T09:13:46.000Z
|
node-transformer-deprecated/model_process.py
|
mandubian/pytorch-neural-ode
|
2d8a6e3a51b7446188ef4851c0d6620f603c9b72
|
[
"Apache-2.0"
] | null | null | null |
node-transformer-deprecated/model_process.py
|
mandubian/pytorch-neural-ode
|
2d8a6e3a51b7446188ef4851c0d6620f603c9b72
|
[
"Apache-2.0"
] | 1
|
2021-10-29T18:30:03.000Z
|
2021-10-29T18:30:03.000Z
|
import time
import math
from tqdm import tqdm #tqdm_notebook as tqdm
import numpy as np
import torch
from torch.utils import data
import torch.nn.functional as F
from transformer import Constants
#from transformer.Translator import Translator
from NodeTranslator import NodeTranslator
from loss import compute_performance
from checkpoints import rotating_save_checkpoint, build_checkpoint
from progress_bar import ProgressBar
def train_epoch(model, training_data, timesteps, optimizer, device, epoch, pb, tb=None, log_interval=100):
model.train()
total_loss = 0
n_word_total = 0
n_word_correct = 0
model.reset_nfes()
#for batch_idx, batch in enumerate(tqdm(training_data, mininterval=2, leave=False)):
for batch_idx, batch in enumerate(training_data):
batch_qs, batch_qs_pos, batch_as, batch_as_pos = map(lambda x: x.to(device), batch)
gold_as = batch_as[:, 1:]
optimizer.zero_grad()
pred_as = model(batch_qs, batch_qs_pos, batch_as, batch_as_pos, timesteps)
loss, n_correct = compute_performance(pred_as, gold_as, smoothing=True)
loss.backward()
# update parameters
optimizer.step()
# note keeping
total_loss += loss.item()
non_pad_mask = gold_as.ne(Constants.PAD)
n_word = non_pad_mask.sum().item()
n_word_total += n_word
n_word_correct += n_correct
if tb is not None and batch_idx % log_interval == 0:
tb.add_scalars(
{
"loss_per_word" : total_loss / n_word_total,
"accuracy" : n_word_correct / n_word_total,
"nfe_encoder": model.nfes[0],
"nfe_decoder": model.nfes[1],
},
group="train",
sub_group="batch",
global_step=epoch * len(training_data) + batch_idx
)
if pb is not None:
pb.training_step(
{
"train_loss": total_loss / n_word_total,
"train_accuracy": 100 * n_word_correct / n_word_total,
}
)
loss_per_word = total_loss / n_word_total
accuracy = n_word_correct / n_word_total
if tb is not None:
tb.add_scalars(
{
"loss_per_word" : loss_per_word,
"accuracy" : accuracy,
"nfe_encoder": model.nfes[0],
"nfe_decoder": model.nfes[1],
},
group="train",
sub_group="epoch",
global_step=epoch
)
return loss_per_word, accuracy
def eval_epoch(model, validation_data, timesteps, device, epoch, tb=None, log_interval=100):
model.eval()
total_loss = 0
n_word_total = 0
n_word_correct = 0
with torch.no_grad():
#for batch_idx, batch in enumerate(tqdm(validation_data, mininterval=2, leave=False)):
for batch_idx, batch in enumerate(validation_data):
# prepare data
batch_qs, batch_qs_pos, batch_as, batch_as_pos = map(lambda x: x.to(device), batch)
gold_as = batch_as[:, 1:]
# forward
pred_as = model(batch_qs, batch_qs_pos, batch_as, batch_as_pos, timesteps)
loss, n_correct = compute_performance(pred_as, gold_as, smoothing=False)
# note keeping
total_loss += loss.item()
non_pad_mask = gold_as.ne(Constants.PAD)
n_word = non_pad_mask.sum().item()
n_word_total += n_word
n_word_correct += n_correct
loss_per_word = total_loss / n_word_total
accuracy = n_word_correct / n_word_total
if tb is not None:
tb.add_scalars(
{
"loss_per_word" : loss_per_word,
"accuracy" : accuracy,
},
group="eval",
sub_group="epoch",
global_step=epoch
)
return loss_per_word, accuracy
def train(exp_name, unique_id,
model, training_data, validation_data, timesteps,
optimizer, device, epochs,
tb=None, log_interval=100,
start_epoch=0, best_valid_accu=0.0, best_valid_loss=float('Inf'), checkpoint_desc={}):
model = model.to(device)
timesteps = timesteps.to(device)
print(f"Loaded model and timesteps to {device}")
pb = ProgressBar(
epochs,
len(training_data),
destroy_on_completed=False,
keys_to_plot=["train_loss", "valid_accu", "best_valid_loss", "best_valid_accu"],
)
for epoch_i in range(start_epoch, epochs):
pb.start_epoch(epoch_i)
print('[ Epoch', epoch_i, ']')
start = time.time()
train_loss, train_accu = train_epoch(model, training_data, timesteps, optimizer, device, epoch_i, pb, tb, log_interval)
print('[Training] loss: {train_loss}, ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f}ms'.format(
train_loss=train_loss, ppl=math.exp(min(train_loss, 100)), accu=100*train_accu,
elapse=(time.time()-start)*1000))
start = time.time()
valid_loss, valid_accu = eval_epoch(model, validation_data, timesteps, device, epoch_i, tb, log_interval)
print('[Validation] loss: {valid_loss}, ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f}ms'.format(
valid_loss=valid_loss, ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu,
elapse=(time.time()-start)*1000))
if valid_accu > best_valid_accu:
print("Checkpointing Validation Model...")
best_valid_accu = valid_accu
best_valid_loss = valid_loss
state = build_checkpoint(exp_name, unique_id, "validation", model, optimizer, best_valid_accu, best_valid_loss, epoch_i, checkpoint_desc)
rotating_save_checkpoint(state, prefix=f"{exp_name}_{unique_id}_validation", path="./checkpoints", nb=5)
pb.end_epoch(
{
"train_loss": train_loss, "train_accu": train_accu,
"valid_loss": valid_loss, "valid_accu": valid_accu,
"best_valid_loss": best_valid_loss, "best_valid_accu": best_valid_accu,
}
)
pb.close()
def predict(translator, data, timesteps, device, max_predictions=None):
if max_predictions is not None:
cur = max_predictions
else:
cur = len(data)
resps = []
for batch_idx, batch in enumerate(data):
if cur == 0:
break
batch_qs, batch_qs_pos = map(lambda x: x.to(device), batch)
all_hyp, all_scores = translator.translate_batch(batch_qs, batch_qs_pos, timesteps)
for i, idx_seqs in enumerate(all_hyp):
for j, idx_seq in enumerate(idx_seqs):
r = np_decode_string(np.array(idx_seq))
s = all_scores[i][j].cpu().item()
resps.append({"resp":r, "score":s})
cur -= 1
return resps
def predict_dataset(dataset, model, timesteps, device, callback, max_token_seq_len, max_batches=None,
beam_size=5, n_best=1,
batch_size=1, num_workers=1):
translator = NodeTranslator(model, device, beam_size=beam_size,
max_token_seq_len=max_token_seq_len, n_best=n_best)
if max_batches is not None:
cur = max_batches
else:
cur = len(dataset)
resps = []
for batch_idx, batch in enumerate(dataset):
if cur == 0:
break
batch_qs, batch_qs_pos, _, _ = map(lambda x: x.to(device), batch)
all_hyp, all_scores = translator.translate_batch(batch_qs, batch_qs_pos, timesteps)
callback(batch_idx, batch, all_hyp, all_scores)
cur -= 1
return resps
def predict_multiple(questions, model, device, max_token_seq_len, beam_size=5,
n_best=1, batch_size=1,
num_workers=1):
questions = list(map(lambda q: np_encode_string(q), questions))
questions = data.DataLoader(questions, batch_size=1, shuffle=False, num_workers=1, collate_fn=question_to_position_batch_collate_fn)
translator = Translator(model, device, beam_size=beam_size, max_token_seq_len=max_token_seq_len, n_best=n_best)
return predict(translator, questions, device)
def predict_single(qs, qs_pos, model, timesteps, device, max_token_seq_len, beam_size=5,
n_best=1):
model = model.eval()
translator = NodeTranslator(model, device, beam_size=beam_size,
max_token_seq_len=max_token_seq_len, n_best=n_best)
qs, qs_pos = qs.to(device), qs_pos.to(device)
all_hyp, all_scores = translator.translate_batch(qs, qs_pos, timesteps)
resps = []
for i, idx_seqs in enumerate(all_hyp):
for j, idx_seq in enumerate(idx_seqs):
s = all_scores[i][j].cpu().item()
resps.append({"resp":np.array(idx_seq), "score":s})
return resps
| 34.483019
| 145
| 0.608995
|
4a1916012f64e47757d1a63e29b6e9cabaa0e2de
| 2,512
|
py
|
Python
|
pypesto/sample/util.py
|
stephanmg/pyPESTO
|
72488fbb3eaa91dd163f88bac71a1a165a0da70f
|
[
"BSD-3-Clause"
] | 97
|
2018-08-01T20:16:57.000Z
|
2022-03-31T18:46:28.000Z
|
pypesto/sample/util.py
|
Doresic/pyPESTO
|
d92bd7801353cd522fa8ec04a750aac120771f01
|
[
"BSD-3-Clause"
] | 758
|
2018-08-01T12:47:28.000Z
|
2022-03-30T21:00:26.000Z
|
pypesto/sample/util.py
|
Doresic/pyPESTO
|
d92bd7801353cd522fa8ec04a750aac120771f01
|
[
"BSD-3-Clause"
] | 36
|
2018-08-16T20:10:15.000Z
|
2022-03-17T16:58:11.000Z
|
"""A set of helper functions"""
import numpy as np
import logging
from typing import Tuple
from ..result import Result
from .diagnostics import geweke_test
logger = logging.getLogger(__name__)
def calculate_ci_mcmc_sample(
result: Result,
ci_level: float = 0.95,
exclude_burn_in: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate parameter credibility intervals based on MCMC samples.
Parameters
----------
result:
The pyPESTO result object with filled sample result.
ci_level:
Lower tail probability, defaults to 95% interval.
Returns
-------
lb, ub:
Bounds of the MCMC percentile-based confidence interval.
"""
burn_in = 0
if exclude_burn_in:
# Check if burn in index is available
if result.sample_result.burn_in is None:
geweke_test(result)
# Get burn in index
burn_in = result.sample_result.burn_in
# Get converged parameter samples as numpy arrays
chain = np.asarray(result.sample_result.trace_x[0, burn_in:, :])
lb, ub = calculate_ci(chain, ci_level=ci_level, axis=0)
return lb, ub
def calculate_ci_mcmc_sample_prediction(
simulated_values: np.ndarray,
ci_level: float = 0.95,
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate prediction credibility intervals based on MCMC samples.
Parameters
----------
simulated_values:
Simulated model states or model observables.
ci_level:
Lower tail probability, defaults to 95% interval.
Returns
-------
lb, ub:
Bounds of the MCMC-based prediction confidence interval.
"""
lb, ub = calculate_ci(simulated_values, ci_level=ci_level, axis=1)
return lb, ub
def calculate_ci(
values: np.ndarray,
ci_level: float,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate confidence/credibility levels using percentiles.
Parameters
----------
values:
The values used to calculate percentiles.
ci_level:
Lower tail probability.
kwargs:
Additional keyword arguments are passed to the `numpy.percentile` call.
Returns
-------
lb, ub:
Bounds of the confidence/credibility interval.
"""
# Percentile values corresponding to the CI level
percentiles = 100 * np.array([(1-ci_level)/2, 1-(1-ci_level)/2])
# Upper and lower bounds
lb, ub = np.percentile(values, percentiles, **kwargs)
return lb, ub
| 26.442105
| 79
| 0.650478
|
4a19171c5d6464c6bfb983ed74f5bcfdda6f06fc
| 7,621
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/containerregistry/v20190601preview/task_run.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/containerregistry/v20190601preview/task_run.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/containerregistry/v20190601preview/task_run.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TaskRun']
class TaskRun(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
force_update_tag: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
run_request: Optional[pulumi.Input[Union[pulumi.InputType['DockerBuildRequestArgs'], pulumi.InputType['EncodedTaskRunRequestArgs'], pulumi.InputType['FileTaskRunRequestArgs'], pulumi.InputType['TaskRunRequestArgs']]]] = None,
task_run_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
The task run that has the ARM resource and properties.
The task run will have the information of request and result of a run.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] force_update_tag: How the run should be forced to rerun even if the run request configuration has not changed
:param pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']] identity: Identity for the resource.
:param pulumi.Input[str] location: The location of the resource
:param pulumi.Input[str] registry_name: The name of the container registry.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
:param pulumi.Input[Union[pulumi.InputType['DockerBuildRequestArgs'], pulumi.InputType['EncodedTaskRunRequestArgs'], pulumi.InputType['FileTaskRunRequestArgs'], pulumi.InputType['TaskRunRequestArgs']]] run_request: The request (parameters) for the run
:param pulumi.Input[str] task_run_name: The name of the task run.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['force_update_tag'] = force_update_tag
__props__['identity'] = identity
__props__['location'] = location
if registry_name is None and not opts.urn:
raise TypeError("Missing required property 'registry_name'")
__props__['registry_name'] = registry_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['run_request'] = run_request
__props__['task_run_name'] = task_run_name
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['run_result'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerregistry:TaskRun")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(TaskRun, __self__).__init__(
'azure-nextgen:containerregistry/v20190601preview:TaskRun',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TaskRun':
"""
Get an existing TaskRun resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return TaskRun(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="forceUpdateTag")
def force_update_tag(self) -> pulumi.Output[Optional[str]]:
"""
How the run should be forced to rerun even if the run request configuration has not changed
"""
return pulumi.get(self, "force_update_tag")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityPropertiesResponse']]:
"""
Identity for the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of this task run
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="runRequest")
def run_request(self) -> pulumi.Output[Optional[Any]]:
"""
The request (parameters) for the run
"""
return pulumi.get(self, "run_request")
@property
@pulumi.getter(name="runResult")
def run_result(self) -> pulumi.Output['outputs.RunResponse']:
"""
The result of this task run
"""
return pulumi.get(self, "run_result")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.873626
| 259
| 0.647553
|
4a1917cac9612f9b1fcc96f6090bd28f7e3cd29c
| 2,186
|
py
|
Python
|
src/smokeyfeet/mollie_webhook/views.py
|
smokeyfeet/smokeyfeet-registration
|
6bd8e6bbab8cdbd678a9ec8fe66a1bb036479240
|
[
"MIT"
] | null | null | null |
src/smokeyfeet/mollie_webhook/views.py
|
smokeyfeet/smokeyfeet-registration
|
6bd8e6bbab8cdbd678a9ec8fe66a1bb036479240
|
[
"MIT"
] | 5
|
2020-06-02T09:36:10.000Z
|
2021-06-10T20:05:03.000Z
|
src/smokeyfeet/mollie_webhook/views.py
|
smokeyfeet/smokeyfeet-registration
|
6bd8e6bbab8cdbd678a9ec8fe66a1bb036479240
|
[
"MIT"
] | null | null | null |
import logging
from django.conf import settings
from django.http import HttpResponse, HttpResponseServerError
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from mollie.api.client import Client as MollieClient
from mollie.api.error import Error as MollieError
from smokeyfeet.minishop import mollie_handler as minishop_mollie
from smokeyfeet.registration import mollie_handler as registration_mollie
logger = logging.getLogger(__name__)
def _make_mollie_client():
mollie_client = MollieClient()
mollie_client.set_api_key(settings.MOLLIE_API_KEY)
return mollie_client
def retrieve_payment(payment_id):
mollie_client = _make_mollie_client()
try:
payment = mollie_client.payments.get(payment_id)
except MollieError as err:
logger.error("Mollie API call failed: %s", str(err))
return None
else:
return payment
@csrf_exempt
@require_POST
def mollie_notif(request):
"""
Mollie will notify us when a payment status changes. Only the payment id is
passed and we are responsible for retrieving the payment.
"""
# Pull out the Mollie payment id from the notification
mollie_payment_id = request.POST.get("id", "")
if not mollie_payment_id:
logger.warning("Missing payment id in Mollie notif (probably test)")
return HttpResponse(status=200)
logger.info("Got Mollie payment status update: %s", mollie_payment_id)
# Retrieve the Mollie payment
mollie_payment = retrieve_payment(mollie_payment_id)
if mollie_payment is None:
logger.error("Failed to retrieve Mollie payment: %s", mollie_payment_id)
return HttpResponseServerError()
else:
logger.info("Retrieved Mollie payment: %s", str(mollie_payment))
metadata = mollie_payment.get("metadata", {})
if "order_id" in metadata:
minishop_mollie.on_payment_change(mollie_payment)
elif "registration_id" in metadata:
registration_mollie.on_payment_change(mollie_payment)
else:
logger.error("Missing identifier in Mollie payment: %s", str(mollie_payment))
return HttpResponse(status=200)
| 32.147059
| 85
| 0.745197
|
4a1917d7e796bad01c026079b72b5293412150cd
| 827
|
py
|
Python
|
app/app/urls.py
|
MrRob100/recipe-app-api
|
4a9b05ab80b4d1f3d79b2750b2c8aae55aa8c004
|
[
"MIT"
] | null | null | null |
app/app/urls.py
|
MrRob100/recipe-app-api
|
4a9b05ab80b4d1f3d79b2750b2c8aae55aa8c004
|
[
"MIT"
] | null | null | null |
app/app/urls.py
|
MrRob100/recipe-app-api
|
4a9b05ab80b4d1f3d79b2750b2c8aae55aa8c004
|
[
"MIT"
] | null | null | null |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
]
| 34.458333
| 77
| 0.706167
|
4a1918d055e823f9aa786122c7902cfb312244b9
| 967
|
py
|
Python
|
tests/test_message_client.py
|
jinlygenius/sparrow_cloud
|
9cc8619aff48f7f439a63dddeb0ec15ca7fc2538
|
[
"MIT"
] | null | null | null |
tests/test_message_client.py
|
jinlygenius/sparrow_cloud
|
9cc8619aff48f7f439a63dddeb0ec15ca7fc2538
|
[
"MIT"
] | null | null | null |
tests/test_message_client.py
|
jinlygenius/sparrow_cloud
|
9cc8619aff48f7f439a63dddeb0ec15ca7fc2538
|
[
"MIT"
] | null | null | null |
import os
import unittest
from unittest import mock
from django.conf import settings
class TestMessage(unittest.TestCase):
"""test message client"""
def setUp(self):
os.environ["SPARROW_TASK_TEST_SVC_HOST"] = "127.0.0.1:8001"
settings.MESSAGE_SENDER_CONF = {
"SERVICE_CONF": {
"ENV_NAME": "SPARROW_TASK_TEST_SVC_HOST",
"VALUE": "sparrow-task-test-svc",
},
"API_PATH": "/api/sparrow_task/producer/send/",
}
@mock.patch('sparrow_cloud.message_service.sender.TaskSender.send_task', return_value={})
def test_send_message(self, mock_send_task):
from sparrow_cloud.message_service.sender import send_task
exchange = 'topic_3'
routing_key = 'order_pay_success'
message_code = 'order_pay_success'
data = send_task(exchange=exchange, routing_key=routing_key, message_code=message_code)
self.assertEqual(data, {})
| 32.233333
| 95
| 0.661841
|
4a191902b6d901032a19dddc4b9a0861032554d5
| 2,481
|
py
|
Python
|
tool/ics.py
|
honmaple/maple-spider
|
b9b6b295114149436974f4fe82f75dc7f2797129
|
[
"MIT"
] | null | null | null |
tool/ics.py
|
honmaple/maple-spider
|
b9b6b295114149436974f4fe82f75dc7f2797129
|
[
"MIT"
] | null | null | null |
tool/ics.py
|
honmaple/maple-spider
|
b9b6b295114149436974f4fe82f75dc7f2797129
|
[
"MIT"
] | 1
|
2019-04-20T03:22:26.000Z
|
2019-04-20T03:22:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2017 jianglin
# File Name: ics.py
# Author: jianglin
# Email: xiyang0807@gmail.com
# Created: 2017-04-12 16:18:05 (CST)
# Last Update:星期三 2017-4-12 17:4:29 (CST)
# By:
# Description:
# **************************************************************************
from lxml import html
from random import choice
import requests
import json
class Spider(object):
def __init__(self, start_url):
self.url_prefix = 'http://m.biqugetw.com'
self.start_url = start_url
@property
def headers(self):
'''
设置header
'''
user_agent = [
'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)',
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1"
]
return {'User-Agent': choice(user_agent)}
def request(self, url):
r = requests.get(url, headers=self.headers)
return r.text
def parse(self, content):
selector = html.fromstring(content)
hrefs = selector.xpath('//tr')
datas = []
for href in hrefs:
data = {}
text = href.xpath('td/p[@class="normal"]/text()')
if len(text) == 6:
data['name'] = href.xpath('td//p/b/a/text()')[0]
data['data_types'] = text[0].replace('\xa0', '')
data['default_task'] = text[1].replace('\xa0', '')
data['attributes_types'] = text[2].replace('\xa0', '')
data['instances'] = text[3].replace('\xa0', '')
data['attributes'] = text[4].replace('\xa0', '')
data['year'] = text[5].replace('\xa0', '')
if data:
datas.append(data)
a = json.dumps(datas)
with open('b.json', 'w') as f:
f.write(a)
def start(self):
return self.parse(self.request(self.start_url))
if __name__ == '__main__':
spider = Spider("http://archive.ics.uci.edu/ml/datasets.html")
spider.start()
| 35.442857
| 130
| 0.520355
|
4a1919752abfaeaf89bcadd30b21c1abdeb47ada
| 1,193
|
py
|
Python
|
help/setter/admin_setter.py
|
relarizky/flask-pre-order
|
d336c179414892a8fad4bdbe63c8cb5449921729
|
[
"MIT"
] | null | null | null |
help/setter/admin_setter.py
|
relarizky/flask-pre-order
|
d336c179414892a8fad4bdbe63c8cb5449921729
|
[
"MIT"
] | null | null | null |
help/setter/admin_setter.py
|
relarizky/flask-pre-order
|
d336c179414892a8fad4bdbe63c8cb5449921729
|
[
"MIT"
] | null | null | null |
# Author : Relarizky
# Github : https://github.com/relarizky
# File Name : help/setter/admin_setter.py
# Last Modified : 02/02/21, 11:34 PM
# Copyright © Relarizky 2021
from help.hash import create_sha224
from help.exception import ValueLengthError
class AdminSetter:
"""
contains setter for Admin (tb_admin) with validation
"""
def set_real_name(self, real_name: str) -> None:
"""
set real name
"""
if real_name.__len__() > 40:
raise ValueLengthError("real name tidak boleh lebih dari 40")
self.real_name = real_name
def set_user_name(self, user_name: str) -> None:
"""
set user name
"""
if user_name.__len__() > 20:
raise ValueLengthError("user name tidak boleh lebih dari 20")
self.user_name = user_name
def set_pass_word(self, pass_word: str) -> None:
"""
set pass word
"""
if not bool(pass_word) is False:
# pass_word is not empty
if pass_word.__len__() < 8:
raise ValueLengthError("pass word harus lebih dari 8")
self.pass_word = create_sha224(pass_word)
| 24.854167
| 73
| 0.602682
|
4a1919bcafdb5b292fbfc9ae7c017dc60b593497
| 17,688
|
py
|
Python
|
HLTrigger/Configuration/test/OnLine_HLT_Fake2.py
|
vjmastra/cmssw
|
de96df37dbaf3543daef67339179e074bde9e858
|
[
"Apache-2.0"
] | 1
|
2018-08-28T16:51:36.000Z
|
2018-08-28T16:51:36.000Z
|
HLTrigger/Configuration/test/OnLine_HLT_Fake2.py
|
dellaric/cmssw
|
cd7470dc554972076740dde7523f311c43f248d3
|
[
"Apache-2.0"
] | 25
|
2016-06-24T20:55:32.000Z
|
2022-02-01T19:24:45.000Z
|
HLTrigger/Configuration/test/OnLine_HLT_Fake2.py
|
dellaric/cmssw
|
cd7470dc554972076740dde7523f311c43f248d3
|
[
"Apache-2.0"
] | 8
|
2016-03-25T07:17:43.000Z
|
2021-07-08T17:11:21.000Z
|
# hltGetConfiguration --full --data /dev/CMSSW_12_3_0/Fake2 --type Fake2 --unprescale --process HLTFake2 --globaltag auto:run2_hlt_Fake2 --input file:RelVal_Raw_Fake2_DATA.root
# /dev/CMSSW_12_3_0/Fake2/V3 (CMSSW_12_3_0_pre2)
import FWCore.ParameterSet.Config as cms
process = cms.Process( "HLTFake2" )
process.HLTConfigVersion = cms.PSet(
tableName = cms.string('/dev/CMSSW_12_3_0/Fake2/V3')
)
process.streams = cms.PSet( A = cms.vstring( 'InitialPD' ) )
process.datasets = cms.PSet( InitialPD = cms.vstring( 'HLT_Physics_v1',
'HLT_Random_v1',
'HLT_ZeroBias_v1' ) )
process.GlobalParametersRcdSource = cms.ESSource( "EmptyESSource",
recordName = cms.string( "L1TGlobalParametersRcd" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.GlobalTag = cms.ESSource( "PoolDBESSource",
DBParameters = cms.PSet(
authenticationPath = cms.untracked.string( "." ),
connectionRetrialTimeOut = cms.untracked.int32( 60 ),
idleConnectionCleanupPeriod = cms.untracked.int32( 10 ),
messageLevel = cms.untracked.int32( 0 ),
enablePoolAutomaticCleanUp = cms.untracked.bool( False ),
enableConnectionSharing = cms.untracked.bool( True ),
enableReadOnlySessionOnUpdateConnection = cms.untracked.bool( False ),
connectionTimeOut = cms.untracked.int32( 0 ),
connectionRetrialPeriod = cms.untracked.int32( 10 )
),
connect = cms.string( "frontier://FrontierProd/CMS_CONDITIONS" ),
globaltag = cms.string( "80X_dataRun2_HLT_v12" ),
snapshotTime = cms.string( "" ),
toGet = cms.VPSet(
),
DumpStat = cms.untracked.bool( False ),
ReconnectEachRun = cms.untracked.bool( False ),
RefreshAlways = cms.untracked.bool( False ),
RefreshEachRun = cms.untracked.bool( False ),
RefreshOpenIOVs = cms.untracked.bool( False )
)
process.GlobalParameters = cms.ESProducer( "StableParametersTrivialProducer",
TotalBxInEvent = cms.int32( 5 ),
NumberPhysTriggers = cms.uint32( 512 ),
NumberL1Muon = cms.uint32( 12 ),
NumberL1EGamma = cms.uint32( 12 ),
NumberL1Jet = cms.uint32( 12 ),
NumberL1Tau = cms.uint32( 8 ),
NumberChips = cms.uint32( 5 ),
PinsOnChip = cms.uint32( 512 ),
OrderOfChip = cms.vint32( 1 ),
NumberL1IsoEG = cms.uint32( 4 ),
NumberL1JetCounts = cms.uint32( 12 ),
UnitLength = cms.int32( 8 ),
NumberL1ForJet = cms.uint32( 4 ),
IfCaloEtaNumberBits = cms.uint32( 4 ),
IfMuEtaNumberBits = cms.uint32( 6 ),
NumberL1TauJet = cms.uint32( 4 ),
NumberL1Mu = cms.uint32( 4 ),
NumberConditionChips = cms.uint32( 1 ),
NumberPsbBoards = cms.int32( 7 ),
NumberL1CenJet = cms.uint32( 4 ),
PinsOnConditionChip = cms.uint32( 512 ),
NumberL1NoIsoEG = cms.uint32( 4 ),
NumberTechnicalTriggers = cms.uint32( 64 ),
NumberPhysTriggersExtended = cms.uint32( 64 ),
WordLength = cms.int32( 64 ),
OrderConditionChip = cms.vint32( 1 ),
appendToDataLabel = cms.string( "" )
)
process.CastorDbProducer = cms.ESProducer( "CastorDbProducer",
appendToDataLabel = cms.string( "" )
)
process.HcalTopologyIdealEP = cms.ESProducer( "HcalTopologyIdealEP",
Exclude = cms.untracked.string( "" ),
MergePosition = cms.untracked.bool( True ),
appendToDataLabel = cms.string( "" )
)
process.hcalDDDRecConstants = cms.ESProducer( "HcalDDDRecConstantsESModule",
appendToDataLabel = cms.string( "" )
)
process.hcalDDDSimConstants = cms.ESProducer( "HcalDDDSimConstantsESModule",
appendToDataLabel = cms.string( "" )
)
process.FastTimerService = cms.Service( "FastTimerService",
printEventSummary = cms.untracked.bool( False ),
printRunSummary = cms.untracked.bool( True ),
printJobSummary = cms.untracked.bool( True ),
writeJSONSummary = cms.untracked.bool( False ),
jsonFileName = cms.untracked.string( "resources.json" ),
enableDQM = cms.untracked.bool( True ),
enableDQMbyModule = cms.untracked.bool( False ),
enableDQMbyPath = cms.untracked.bool( False ),
enableDQMbyLumiSection = cms.untracked.bool( True ),
enableDQMbyProcesses = cms.untracked.bool( True ),
enableDQMTransitions = cms.untracked.bool( False ),
dqmTimeRange = cms.untracked.double( 1000.0 ),
dqmTimeResolution = cms.untracked.double( 5.0 ),
dqmMemoryRange = cms.untracked.double( 1000000.0 ),
dqmMemoryResolution = cms.untracked.double( 5000.0 ),
dqmPathTimeRange = cms.untracked.double( 100.0 ),
dqmPathTimeResolution = cms.untracked.double( 0.5 ),
dqmPathMemoryRange = cms.untracked.double( 1000000.0 ),
dqmPathMemoryResolution = cms.untracked.double( 5000.0 ),
dqmModuleTimeRange = cms.untracked.double( 40.0 ),
dqmModuleTimeResolution = cms.untracked.double( 0.2 ),
dqmModuleMemoryRange = cms.untracked.double( 100000.0 ),
dqmModuleMemoryResolution = cms.untracked.double( 500.0 ),
dqmLumiSectionsRange = cms.untracked.uint32( 2500 ),
dqmPath = cms.untracked.string( "HLT/TimerService" ),
)
process.MessageLogger = cms.Service( "MessageLogger",
suppressWarning = cms.untracked.vstring( 'hltOnlineBeamSpot',
'hltCtf3HitL1SeededWithMaterialTracks',
'hltL3MuonsOIState',
'hltPixelTracksForHighMult',
'hltHITPixelTracksHE',
'hltHITPixelTracksHB',
'hltCtfL1SeededWithMaterialTracks',
'hltRegionalTracksForL3MuonIsolation',
'hltSiPixelClusters',
'hltActivityStartUpElectronPixelSeeds',
'hltLightPFTracks',
'hltPixelVertices3DbbPhi',
'hltL3MuonsIOHit',
'hltPixelTracks',
'hltSiPixelDigis',
'hltL3MuonsOIHit',
'hltL1SeededElectronGsfTracks',
'hltL1SeededStartUpElectronPixelSeeds',
'hltBLifetimeRegionalCtfWithMaterialTracksbbPhiL1FastJetFastPV',
'hltCtfActivityWithMaterialTracks' ),
suppressFwkInfo = cms.untracked.vstring( ),
suppressInfo = cms.untracked.vstring( ),
suppressDebug = cms.untracked.vstring( ),
debugModules = cms.untracked.vstring( ),
cerr = cms.untracked.PSet(
INFO = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
noTimeStamps = cms.untracked.bool( False ),
FwkReport = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 1 ),
limit = cms.untracked.int32( 0 )
),
default = cms.untracked.PSet( limit = cms.untracked.int32( 10000000 ) ),
Root_NoDictionary = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkJob = cms.untracked.PSet( limit = cms.untracked.int32( 0 ) ),
FwkSummary = cms.untracked.PSet(
reportEvery = cms.untracked.int32( 1 ),
limit = cms.untracked.int32( 10000000 )
),
threshold = cms.untracked.string( "INFO" ),
),
suppressError = cms.untracked.vstring( 'hltOnlineBeamSpot',
'hltL3MuonCandidates',
'hltL3TkTracksFromL2OIState',
'hltPFJetCtfWithMaterialTracks',
'hltL3TkTracksFromL2IOHit',
'hltL3TkTracksFromL2OIHit' )
)
process.hltGetConditions = cms.EDAnalyzer( "EventSetupRecordDataGetter",
verbose = cms.untracked.bool( False ),
toGet = cms.VPSet(
)
)
process.hltGetRaw = cms.EDAnalyzer( "HLTGetRaw",
RawDataCollection = cms.InputTag( "rawDataCollector" )
)
process.hltBoolFalse = cms.EDFilter( "HLTBool",
result = cms.bool( False )
)
process.hltTriggerType = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 1 )
)
process.hltGtStage2Digis = cms.EDProducer( "L1TRawToDigi",
FedIds = cms.vint32( 1404 ),
Setup = cms.string( "stage2::GTSetup" ),
FWId = cms.uint32( 0 ),
DmxFWId = cms.uint32( 0 ),
FWOverride = cms.bool( False ),
TMTCheck = cms.bool( True ),
CTP7 = cms.untracked.bool( False ),
MTF7 = cms.untracked.bool( False ),
InputLabel = cms.InputTag( "rawDataCollector" ),
lenSlinkHeader = cms.untracked.int32( 8 ),
lenSlinkTrailer = cms.untracked.int32( 8 ),
lenAMCHeader = cms.untracked.int32( 8 ),
lenAMCTrailer = cms.untracked.int32( 0 ),
lenAMC13Header = cms.untracked.int32( 8 ),
lenAMC13Trailer = cms.untracked.int32( 8 ),
debug = cms.untracked.bool( False ),
MinFeds = cms.uint32( 0 )
)
process.hltGtStage2ObjectMap = cms.EDProducer( "L1TGlobalProducer",
MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
MuonShowerInputTag = cms.InputTag( "" ),
EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' ),
ExtInputTag = cms.InputTag( "hltGtStage2Digis" ),
AlgoBlkInputTag = cms.InputTag( "hltGtStage2Digis" ),
GetPrescaleColumnFromData = cms.bool( False ),
AlgorithmTriggersUnprescaled = cms.bool( True ),
RequireMenuToMatchAlgoBlkInput = cms.bool( True ),
AlgorithmTriggersUnmasked = cms.bool( True ),
useMuonShowers = cms.bool( False ),
ProduceL1GtDaqRecord = cms.bool( True ),
ProduceL1GtObjectMapRecord = cms.bool( True ),
EmulateBxInEvent = cms.int32( 1 ),
L1DataBxInEvent = cms.int32( 5 ),
AlternativeNrBxBoardDaq = cms.uint32( 0 ),
BstLengthBytes = cms.int32( -1 ),
PrescaleSet = cms.uint32( 1 ),
Verbosity = cms.untracked.int32( 0 ),
PrintL1Menu = cms.untracked.bool( False ),
TriggerMenuLuminosity = cms.string( "startup" ),
PrescaleCSVFile = cms.string( "prescale_L1TGlobal.csv" )
)
process.hltScalersRawToDigi = cms.EDProducer( "ScalersRawToDigi",
scalersInputTag = cms.InputTag( "rawDataCollector" )
)
process.hltOnlineBeamSpot = cms.EDProducer( "BeamSpotOnlineProducer",
changeToCMSCoordinates = cms.bool( False ),
maxZ = cms.double( 40.0 ),
setSigmaZ = cms.double( 0.0 ),
beamMode = cms.untracked.uint32( 11 ),
src = cms.InputTag( "hltScalersRawToDigi" ),
gtEvmLabel = cms.InputTag( "" ),
maxRadius = cms.double( 2.0 ),
useTransientRecord = cms.bool( False )
)
process.hltPrePhysics = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltBoolEnd = cms.EDFilter( "HLTBool",
result = cms.bool( True )
)
process.hltRandomEventsFilter = cms.EDFilter( "HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32( 3 )
)
process.hltPreRandom = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1sZeroBias = cms.EDFilter( "HLTL1TSeed",
saveTags = cms.bool( True ),
L1SeedsLogicalExpression = cms.string( "L1_ZeroBias" ),
L1ObjectMapInputTag = cms.InputTag( "hltGtStage2ObjectMap" ),
L1GlobalInputTag = cms.InputTag( "hltGtStage2Digis" ),
L1MuonInputTag = cms.InputTag( 'hltGtStage2Digis','Muon' ),
L1EGammaInputTag = cms.InputTag( 'hltGtStage2Digis','EGamma' ),
L1JetInputTag = cms.InputTag( 'hltGtStage2Digis','Jet' ),
L1TauInputTag = cms.InputTag( 'hltGtStage2Digis','Tau' ),
L1EtSumInputTag = cms.InputTag( 'hltGtStage2Digis','EtSum' )
)
process.hltPreZeroBias = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltFEDSelector = cms.EDProducer( "EvFFEDSelector",
inputTag = cms.InputTag( "rawDataCollector" ),
fedList = cms.vuint32( 1023, 1024 )
)
process.hltTriggerSummaryAOD = cms.EDProducer( "TriggerSummaryProducerAOD",
throw = cms.bool( False ),
processName = cms.string( "@" ),
moduleLabelPatternsToMatch = cms.vstring( 'hlt*' ),
moduleLabelPatternsToSkip = cms.vstring( )
)
process.hltTriggerSummaryRAW = cms.EDProducer( "TriggerSummaryProducerRAW",
processName = cms.string( "@" )
)
process.hltPreHLTAnalyzerEndpath = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtStage2Digis" )
)
process.hltL1TGlobalSummary = cms.EDAnalyzer( "L1TGlobalSummary",
AlgInputTag = cms.InputTag( "hltGtStage2Digis" ),
ExtInputTag = cms.InputTag( "hltGtStage2Digis" ),
MinBx = cms.int32( 0 ),
MaxBx = cms.int32( 0 ),
DumpTrigResults = cms.bool( False ),
DumpRecord = cms.bool( False ),
DumpTrigSummary = cms.bool( True ),
ReadPrescalesFromFile = cms.bool( False ),
psFileName = cms.string( "prescale_L1TGlobal.csv" ),
psColumn = cms.int32( 0 )
)
process.hltTrigReport = cms.EDAnalyzer( "HLTrigReport",
HLTriggerResults = cms.InputTag( 'TriggerResults','','@currentProcess' ),
reportBy = cms.untracked.string( "job" ),
resetBy = cms.untracked.string( "never" ),
serviceBy = cms.untracked.string( "never" ),
ReferencePath = cms.untracked.string( "HLTriggerFinalPath" ),
ReferenceRate = cms.untracked.double( 100.0 )
)
process.hltPreAOutput = cms.EDFilter( "HLTPrescaler",
offset = cms.uint32( 0 ),
L1GtReadoutRecordTag = cms.InputTag( "hltGtDigis" )
)
process.hltOutputA = cms.OutputModule( "PoolOutputModule",
fileName = cms.untracked.string( "outputA.root" ),
fastCloning = cms.untracked.bool( False ),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string( "" ),
dataTier = cms.untracked.string( "RAW" )
),
SelectEvents = cms.untracked.PSet( SelectEvents = cms.vstring( 'HLT_Physics_v1',
'HLT_Random_v1',
'HLT_ZeroBias_v1' ) ),
outputCommands = cms.untracked.vstring( 'drop *',
'keep *_hltL1GtObjectMap_*_*',
'keep FEDRawDataCollection_rawDataCollector_*_*',
'keep FEDRawDataCollection_source_*_*',
'keep edmTriggerResults_*_*_*',
'keep triggerTriggerEvent_*_*_*' )
)
process.HLTL1UnpackerSequence = cms.Sequence( process.hltGtStage2Digis + process.hltGtStage2ObjectMap )
process.HLTBeamSpot = cms.Sequence( process.hltScalersRawToDigi + process.hltOnlineBeamSpot )
process.HLTBeginSequence = cms.Sequence( process.hltTriggerType + process.HLTL1UnpackerSequence + process.HLTBeamSpot )
process.HLTEndSequence = cms.Sequence( process.hltBoolEnd )
process.HLTBeginSequenceRandom = cms.Sequence( process.hltRandomEventsFilter + process.hltGtStage2Digis )
process.HLTriggerFirstPath = cms.Path( process.hltGetConditions + process.hltGetRaw + process.hltBoolFalse )
process.HLT_Physics_v1 = cms.Path( process.HLTBeginSequence + process.hltPrePhysics + process.HLTEndSequence )
process.HLT_Random_v1 = cms.Path( process.HLTBeginSequenceRandom + process.hltPreRandom + process.HLTEndSequence )
process.HLT_ZeroBias_v1 = cms.Path( process.HLTBeginSequence + process.hltL1sZeroBias + process.hltPreZeroBias + process.HLTEndSequence )
process.HLTriggerFinalPath = cms.Path( process.hltGtStage2Digis + process.hltScalersRawToDigi + process.hltFEDSelector + process.hltTriggerSummaryAOD + process.hltTriggerSummaryRAW + process.hltBoolFalse )
process.HLTAnalyzerEndpath = cms.EndPath( process.hltGtStage2Digis + process.hltPreHLTAnalyzerEndpath + process.hltL1TGlobalSummary + process.hltTrigReport )
process.AOutput = cms.EndPath( process.hltPreAOutput + process.hltOutputA )
# load the DQMStore and DQMRootOutputModule
process.load( "DQMServices.Core.DQMStore_cfi" )
process.dqmOutput = cms.OutputModule("DQMRootOutputModule",
fileName = cms.untracked.string("DQMIO.root")
)
process.DQMOutput = cms.EndPath( process.dqmOutput )
process.schedule = cms.Schedule( *(process.HLTriggerFirstPath, process.HLT_Physics_v1, process.HLT_Random_v1, process.HLT_ZeroBias_v1, process.HLTriggerFinalPath, process.HLTAnalyzerEndpath, process.AOutput, process.DQMOutput, ))
# source module (EDM inputs)
process.source = cms.Source( "PoolSource",
fileNames = cms.untracked.vstring(
'file:RelVal_Raw_Fake2_DATA.root',
),
inputCommands = cms.untracked.vstring(
'keep *'
)
)
# limit the number of events to be processed
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32( 100 )
)
# enable TrigReport, TimeReport and MultiThreading
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool( True ),
numberOfThreads = cms.untracked.uint32( 4 ),
numberOfStreams = cms.untracked.uint32( 0 ),
)
# override the GlobalTag, connection string and pfnPrefix
if 'GlobalTag' in process.__dict__:
from Configuration.AlCa.GlobalTag import GlobalTag as customiseGlobalTag
process.GlobalTag = customiseGlobalTag(process.GlobalTag, globaltag = 'auto:run2_hlt_Fake2')
# show summaries from trigger analysers used at HLT
if 'MessageLogger' in process.__dict__:
process.MessageLogger.TriggerSummaryProducerAOD = cms.untracked.PSet()
process.MessageLogger.L1GtTrigReport = cms.untracked.PSet()
process.MessageLogger.L1TGlobalSummary = cms.untracked.PSet()
process.MessageLogger.HLTrigReport = cms.untracked.PSet()
process.MessageLogger.FastReport = cms.untracked.PSet()
process.MessageLogger.ThroughputService = cms.untracked.PSet()
# add specific customizations
_customInfo = {}
_customInfo['menuType' ]= "Fake2"
_customInfo['globalTags']= {}
_customInfo['globalTags'][True ] = "auto:run2_hlt_Fake2"
_customInfo['globalTags'][False] = "auto:run2_mc_Fake2"
_customInfo['inputFiles']={}
_customInfo['inputFiles'][True] = "file:RelVal_Raw_Fake2_DATA.root"
_customInfo['inputFiles'][False] = "file:RelVal_Raw_Fake2_MC.root"
_customInfo['maxEvents' ]= 100
_customInfo['globalTag' ]= "auto:run2_hlt_Fake2"
_customInfo['inputFile' ]= ['file:RelVal_Raw_Fake2_DATA.root']
_customInfo['realData' ]= True
from HLTrigger.Configuration.customizeHLTforALL import customizeHLTforAll
process = customizeHLTforAll(process,"Fake2",_customInfo)
from HLTrigger.Configuration.customizeHLTforCMSSW import customizeHLTforCMSSW
process = customizeHLTforCMSSW(process,"Fake2")
# Eras-based customisations
from HLTrigger.Configuration.Eras import modifyHLTforEras
modifyHLTforEras(process)
| 42.519231
| 229
| 0.722185
|
4a1919f12c5effb37031c673a2db65c11940f84f
| 271
|
py
|
Python
|
pyparcel/__version__.py
|
najaco/PyParcel
|
47f2aa4fb64c8d866b630262928bd2715956bf07
|
[
"MIT"
] | 1
|
2020-05-22T18:28:36.000Z
|
2020-05-22T18:28:36.000Z
|
pyparcel/__version__.py
|
najaco/PyParcel
|
47f2aa4fb64c8d866b630262928bd2715956bf07
|
[
"MIT"
] | 15
|
2020-05-18T16:15:03.000Z
|
2021-05-03T19:33:48.000Z
|
pyparcel/__version__.py
|
najaco/PyParcel
|
47f2aa4fb64c8d866b630262928bd2715956bf07
|
[
"MIT"
] | 2
|
2020-05-17T21:25:30.000Z
|
2020-05-18T22:54:23.000Z
|
__title__ = "pyparcel"
__description__ = "The simple and secure way to convert objects to bytestrings."
__url__ = "https://najaco.github.io/pyparcel/"
__version__ = "1.0.0"
__author__ = "Nathan Cohen"
__author_email__ = "ncohen4299@gmail.com"
__license__ = "MIT License"
| 33.875
| 80
| 0.760148
|
4a191ab1ed4e38d7a8d485384eddca30737aacaf
| 549
|
py
|
Python
|
output/models/nist_data/atomic/non_negative_integer/schema_instance/nistschema_sv_iv_atomic_non_negative_integer_max_exclusive_1_xsd/nistschema_sv_iv_atomic_non_negative_integer_max_exclusive_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/non_negative_integer/schema_instance/nistschema_sv_iv_atomic_non_negative_integer_max_exclusive_1_xsd/nistschema_sv_iv_atomic_non_negative_integer_max_exclusive_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/non_negative_integer/schema_instance/nistschema_sv_iv_atomic_non_negative_integer_max_exclusive_1_xsd/nistschema_sv_iv_atomic_non_negative_integer_max_exclusive_1.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-nonNegativeInteger-maxExclusive-1-NS"
@dataclass
class NistschemaSvIvAtomicNonNegativeIntegerMaxExclusive1:
class Meta:
name = "NISTSchema-SV-IV-atomic-nonNegativeInteger-maxExclusive-1"
namespace = "NISTSchema-SV-IV-atomic-nonNegativeInteger-maxExclusive-1-NS"
value: Optional[int] = field(
default=None,
metadata={
"required": True,
"max_exclusive": 1,
}
)
| 27.45
| 82
| 0.695811
|
4a191bc965ce800239b432eb53719005c51116ad
| 9,041
|
py
|
Python
|
rootfs/usr/lib/python3/dist-packages/serial/win32.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | null | null | null |
rootfs/usr/lib/python3/dist-packages/serial/win32.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | 1
|
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
rootfs/usr/lib/python3/dist-packages/serial/win32.py
|
kappaIO-Dev/kappaIO-sdk-armhf-crosscompile
|
66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2
|
[
"MIT"
] | null | null | null |
from ctypes import *
from ctypes.wintypes import HANDLE
from ctypes.wintypes import BOOL
from ctypes.wintypes import LPCWSTR
_stdcall_libraries = {}
_stdcall_libraries['kernel32'] = WinDLL('kernel32')
from ctypes.wintypes import DWORD
from ctypes.wintypes import WORD
from ctypes.wintypes import BYTE
INVALID_HANDLE_VALUE = HANDLE(-1).value
class _SECURITY_ATTRIBUTES(Structure):
pass
LPSECURITY_ATTRIBUTES = POINTER(_SECURITY_ATTRIBUTES)
CreateEventW = _stdcall_libraries['kernel32'].CreateEventW
CreateEventW.restype = HANDLE
CreateEventW.argtypes = [LPSECURITY_ATTRIBUTES, BOOL, BOOL, LPCWSTR]
CreateEvent = CreateEventW # alias
CreateFileW = _stdcall_libraries['kernel32'].CreateFileW
CreateFileW.restype = HANDLE
CreateFileW.argtypes = [LPCWSTR, DWORD, DWORD, LPSECURITY_ATTRIBUTES, DWORD, DWORD, HANDLE]
CreateFile = CreateFileW # alias
class _OVERLAPPED(Structure):
pass
OVERLAPPED = _OVERLAPPED
class _COMSTAT(Structure):
pass
COMSTAT = _COMSTAT
class _DCB(Structure):
pass
DCB = _DCB
class _COMMTIMEOUTS(Structure):
pass
COMMTIMEOUTS = _COMMTIMEOUTS
GetLastError = _stdcall_libraries['kernel32'].GetLastError
GetLastError.restype = DWORD
GetLastError.argtypes = []
LPOVERLAPPED = POINTER(_OVERLAPPED)
LPDWORD = POINTER(DWORD)
GetOverlappedResult = _stdcall_libraries['kernel32'].GetOverlappedResult
GetOverlappedResult.restype = BOOL
GetOverlappedResult.argtypes = [HANDLE, LPOVERLAPPED, LPDWORD, BOOL]
ResetEvent = _stdcall_libraries['kernel32'].ResetEvent
ResetEvent.restype = BOOL
ResetEvent.argtypes = [HANDLE]
LPCVOID = c_void_p
WriteFile = _stdcall_libraries['kernel32'].WriteFile
WriteFile.restype = BOOL
WriteFile.argtypes = [HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED]
LPVOID = c_void_p
ReadFile = _stdcall_libraries['kernel32'].ReadFile
ReadFile.restype = BOOL
ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPOVERLAPPED]
CloseHandle = _stdcall_libraries['kernel32'].CloseHandle
CloseHandle.restype = BOOL
CloseHandle.argtypes = [HANDLE]
ClearCommBreak = _stdcall_libraries['kernel32'].ClearCommBreak
ClearCommBreak.restype = BOOL
ClearCommBreak.argtypes = [HANDLE]
LPCOMSTAT = POINTER(_COMSTAT)
ClearCommError = _stdcall_libraries['kernel32'].ClearCommError
ClearCommError.restype = BOOL
ClearCommError.argtypes = [HANDLE, LPDWORD, LPCOMSTAT]
SetupComm = _stdcall_libraries['kernel32'].SetupComm
SetupComm.restype = BOOL
SetupComm.argtypes = [HANDLE, DWORD, DWORD]
EscapeCommFunction = _stdcall_libraries['kernel32'].EscapeCommFunction
EscapeCommFunction.restype = BOOL
EscapeCommFunction.argtypes = [HANDLE, DWORD]
GetCommModemStatus = _stdcall_libraries['kernel32'].GetCommModemStatus
GetCommModemStatus.restype = BOOL
GetCommModemStatus.argtypes = [HANDLE, LPDWORD]
LPDCB = POINTER(_DCB)
GetCommState = _stdcall_libraries['kernel32'].GetCommState
GetCommState.restype = BOOL
GetCommState.argtypes = [HANDLE, LPDCB]
LPCOMMTIMEOUTS = POINTER(_COMMTIMEOUTS)
GetCommTimeouts = _stdcall_libraries['kernel32'].GetCommTimeouts
GetCommTimeouts.restype = BOOL
GetCommTimeouts.argtypes = [HANDLE, LPCOMMTIMEOUTS]
PurgeComm = _stdcall_libraries['kernel32'].PurgeComm
PurgeComm.restype = BOOL
PurgeComm.argtypes = [HANDLE, DWORD]
SetCommBreak = _stdcall_libraries['kernel32'].SetCommBreak
SetCommBreak.restype = BOOL
SetCommBreak.argtypes = [HANDLE]
SetCommMask = _stdcall_libraries['kernel32'].SetCommMask
SetCommMask.restype = BOOL
SetCommMask.argtypes = [HANDLE, DWORD]
SetCommState = _stdcall_libraries['kernel32'].SetCommState
SetCommState.restype = BOOL
SetCommState.argtypes = [HANDLE, LPDCB]
SetCommTimeouts = _stdcall_libraries['kernel32'].SetCommTimeouts
SetCommTimeouts.restype = BOOL
SetCommTimeouts.argtypes = [HANDLE, LPCOMMTIMEOUTS]
WaitForSingleObject = _stdcall_libraries['kernel32'].WaitForSingleObject
WaitForSingleObject.restype = DWORD
WaitForSingleObject.argtypes = [HANDLE, DWORD]
ONESTOPBIT = 0 # Variable c_int
TWOSTOPBITS = 2 # Variable c_int
ONE5STOPBITS = 1
NOPARITY = 0 # Variable c_int
ODDPARITY = 1 # Variable c_int
EVENPARITY = 2 # Variable c_int
MARKPARITY = 3
SPACEPARITY = 4
RTS_CONTROL_HANDSHAKE = 2 # Variable c_int
RTS_CONTROL_DISABLE = 0 # Variable c_int
RTS_CONTROL_ENABLE = 1 # Variable c_int
SETRTS = 3
CLRRTS = 4
DTR_CONTROL_HANDSHAKE = 2 # Variable c_int
DTR_CONTROL_DISABLE = 0 # Variable c_int
DTR_CONTROL_ENABLE = 1 # Variable c_int
SETDTR = 5
CLRDTR = 6
MS_DSR_ON = 32 # Variable c_ulong
EV_RING = 256 # Variable c_int
EV_PERR = 512 # Variable c_int
EV_ERR = 128 # Variable c_int
SETXOFF = 1 # Variable c_int
EV_RXCHAR = 1 # Variable c_int
GENERIC_WRITE = 1073741824 # Variable c_long
PURGE_TXCLEAR = 4 # Variable c_int
FILE_FLAG_OVERLAPPED = 1073741824 # Variable c_int
EV_DSR = 16 # Variable c_int
MAXDWORD = 4294967295 # Variable c_uint
EV_RLSD = 32 # Variable c_int
ERROR_IO_PENDING = 997 # Variable c_long
MS_CTS_ON = 16 # Variable c_ulong
EV_EVENT1 = 2048 # Variable c_int
EV_RX80FULL = 1024 # Variable c_int
PURGE_RXABORT = 2 # Variable c_int
FILE_ATTRIBUTE_NORMAL = 128 # Variable c_int
PURGE_TXABORT = 1 # Variable c_int
SETXON = 2 # Variable c_int
OPEN_EXISTING = 3 # Variable c_int
MS_RING_ON = 64 # Variable c_ulong
EV_TXEMPTY = 4 # Variable c_int
EV_RXFLAG = 2 # Variable c_int
MS_RLSD_ON = 128 # Variable c_ulong
GENERIC_READ = 2147483648 # Variable c_ulong
EV_EVENT2 = 4096 # Variable c_int
EV_CTS = 8 # Variable c_int
EV_BREAK = 64 # Variable c_int
PURGE_RXCLEAR = 8 # Variable c_int
ULONG_PTR = c_ulong
INFINITE = 0xFFFFFFFF
class N11_OVERLAPPED4DOLLAR_48E(Union):
pass
class N11_OVERLAPPED4DOLLAR_484DOLLAR_49E(Structure):
pass
N11_OVERLAPPED4DOLLAR_484DOLLAR_49E._fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD),
]
PVOID = c_void_p
N11_OVERLAPPED4DOLLAR_48E._anonymous_ = ['_0']
N11_OVERLAPPED4DOLLAR_48E._fields_ = [
('_0', N11_OVERLAPPED4DOLLAR_484DOLLAR_49E),
('Pointer', PVOID),
]
_OVERLAPPED._anonymous_ = ['_0']
_OVERLAPPED._fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_0', N11_OVERLAPPED4DOLLAR_48E),
('hEvent', HANDLE),
]
_SECURITY_ATTRIBUTES._fields_ = [
('nLength', DWORD),
('lpSecurityDescriptor', LPVOID),
('bInheritHandle', BOOL),
]
_COMSTAT._fields_ = [
('fCtsHold', DWORD, 1),
('fDsrHold', DWORD, 1),
('fRlsdHold', DWORD, 1),
('fXoffHold', DWORD, 1),
('fXoffSent', DWORD, 1),
('fEof', DWORD, 1),
('fTxim', DWORD, 1),
('fReserved', DWORD, 25),
('cbInQue', DWORD),
('cbOutQue', DWORD),
]
_DCB._fields_ = [
('DCBlength', DWORD),
('BaudRate', DWORD),
('fBinary', DWORD, 1),
('fParity', DWORD, 1),
('fOutxCtsFlow', DWORD, 1),
('fOutxDsrFlow', DWORD, 1),
('fDtrControl', DWORD, 2),
('fDsrSensitivity', DWORD, 1),
('fTXContinueOnXoff', DWORD, 1),
('fOutX', DWORD, 1),
('fInX', DWORD, 1),
('fErrorChar', DWORD, 1),
('fNull', DWORD, 1),
('fRtsControl', DWORD, 2),
('fAbortOnError', DWORD, 1),
('fDummy2', DWORD, 17),
('wReserved', WORD),
('XonLim', WORD),
('XoffLim', WORD),
('ByteSize', BYTE),
('Parity', BYTE),
('StopBits', BYTE),
('XonChar', c_char),
('XoffChar', c_char),
('ErrorChar', c_char),
('EofChar', c_char),
('EvtChar', c_char),
('wReserved1', WORD),
]
_COMMTIMEOUTS._fields_ = [
('ReadIntervalTimeout', DWORD),
('ReadTotalTimeoutMultiplier', DWORD),
('ReadTotalTimeoutConstant', DWORD),
('WriteTotalTimeoutMultiplier', DWORD),
('WriteTotalTimeoutConstant', DWORD),
]
__all__ = ['GetLastError', 'MS_CTS_ON', 'FILE_ATTRIBUTE_NORMAL',
'DTR_CONTROL_ENABLE', '_COMSTAT', 'MS_RLSD_ON',
'GetOverlappedResult', 'SETXON', 'PURGE_TXABORT',
'PurgeComm', 'N11_OVERLAPPED4DOLLAR_48E', 'EV_RING',
'ONESTOPBIT', 'SETXOFF', 'PURGE_RXABORT', 'GetCommState',
'RTS_CONTROL_ENABLE', '_DCB', 'CreateEvent',
'_COMMTIMEOUTS', '_SECURITY_ATTRIBUTES', 'EV_DSR',
'EV_PERR', 'EV_RXFLAG', 'OPEN_EXISTING', 'DCB',
'FILE_FLAG_OVERLAPPED', 'EV_CTS', 'SetupComm',
'LPOVERLAPPED', 'EV_TXEMPTY', 'ClearCommBreak',
'LPSECURITY_ATTRIBUTES', 'SetCommBreak', 'SetCommTimeouts',
'COMMTIMEOUTS', 'ODDPARITY', 'EV_RLSD',
'GetCommModemStatus', 'EV_EVENT2', 'PURGE_TXCLEAR',
'EV_BREAK', 'EVENPARITY', 'LPCVOID', 'COMSTAT', 'ReadFile',
'PVOID', '_OVERLAPPED', 'WriteFile', 'GetCommTimeouts',
'ResetEvent', 'EV_RXCHAR', 'LPCOMSTAT', 'ClearCommError',
'ERROR_IO_PENDING', 'EscapeCommFunction', 'GENERIC_READ',
'RTS_CONTROL_HANDSHAKE', 'OVERLAPPED',
'DTR_CONTROL_HANDSHAKE', 'PURGE_RXCLEAR', 'GENERIC_WRITE',
'LPDCB', 'CreateEventW', 'SetCommMask', 'EV_EVENT1',
'SetCommState', 'LPVOID', 'CreateFileW', 'LPDWORD',
'EV_RX80FULL', 'TWOSTOPBITS', 'LPCOMMTIMEOUTS', 'MAXDWORD',
'MS_DSR_ON', 'MS_RING_ON',
'N11_OVERLAPPED4DOLLAR_484DOLLAR_49E', 'EV_ERR',
'ULONG_PTR', 'CreateFile', 'NOPARITY', 'CloseHandle']
| 31.283737
| 91
| 0.725362
|
4a191be9591651eccf940d1c7e92f81aa4a113cf
| 951
|
py
|
Python
|
scanpy/preprocessing/_utils.py
|
ShobiStassen/scanpy
|
44320ea365e6e22a0ff87dbe8be1ebf01d89b5cb
|
[
"BSD-3-Clause"
] | 1
|
2020-01-18T07:48:09.000Z
|
2020-01-18T07:48:09.000Z
|
scanpy/preprocessing/_utils.py
|
ShobiStassen/scanpy
|
44320ea365e6e22a0ff87dbe8be1ebf01d89b5cb
|
[
"BSD-3-Clause"
] | null | null | null |
scanpy/preprocessing/_utils.py
|
ShobiStassen/scanpy
|
44320ea365e6e22a0ff87dbe8be1ebf01d89b5cb
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from scipy.sparse import issparse
STANDARD_SCALER_FIXED = False
def _get_mean_var(X):
# - using sklearn.StandardScaler throws an error related to
# int to long trafo for very large matrices
# - using X.multiply is slower
if not STANDARD_SCALER_FIXED:
mean = X.mean(axis=0)
if issparse(X):
mean_sq = X.multiply(X).mean(axis=0)
mean = mean.A1
mean_sq = mean_sq.A1
else:
mean_sq = np.multiply(X, X).mean(axis=0)
# enforece R convention (unbiased estimator) for variance
var = (mean_sq - mean ** 2) * (X.shape[0] / (X.shape[0] - 1))
else:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler(with_mean=False).partial_fit(X)
mean = scaler.mean_
# enforce R convention (unbiased estimator)
var = scaler.var_ * (X.shape[0] / (X.shape[0] - 1))
return mean, var
| 31.7
| 69
| 0.618297
|
4a191c35e669860db95024dedd522262357f8453
| 119,391
|
py
|
Python
|
object_detection/core/target_assigner_test.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | 1
|
2021-05-22T12:50:50.000Z
|
2021-05-22T12:50:50.000Z
|
object_detection/core/target_assigner_test.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
object_detection/core/target_assigner_test.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.target_assigner."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_list
from object_detection.core import region_similarity_calculator
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as targetassigner
from object_detection.matchers import argmax_matcher
from object_detection.utils import np_box_ops
from object_detection.utils import test_case
from object_detection.utils import tf_version
class TargetAssignerTest(test_case.TestCase):
def test_assign_agnostic(self):
def graph_fn(anchor_means, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.3)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]], dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [0]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_agnostic_with_keypoints(self):
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_multiclass(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]]
exp_cls_weights = [[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_multiclass_with_groundtruth_weights(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels,
groundtruth_weights):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label,
groundtruth_weights=groundtruth_weights)
(_, cls_weights, _, reg_weights, _) = result
return (cls_weights, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32)
# background class gets weight of 1.
exp_cls_weights = [[0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]
exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0.
(cls_weights_out, reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_box_corners, groundtruth_labels,
groundtruth_weights
])
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_assign_multidimensional_class_targets(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([[0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 1], [1, .5]]], np.float32)
exp_cls_targets = [[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 0], [0, 0]],
[[0, 1], [1, .5]]]
exp_cls_weights = [[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([0, 0, 0], tf.float32)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
groundtruth_labels = np.zeros((0, 3), dtype=np.float32)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]],
dtype=np.float32)
exp_cls_targets = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
exp_cls_weights = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
exp_reg_weights = [0, 0, 0, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]])
priors = box_list.BoxList(prior_means)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.5, 0.8],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], tf.float32)
with self.assertRaisesRegexp(ValueError, 'Unequal shapes'):
target_assigner.assign(
priors,
boxes,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
def test_raises_error_on_invalid_groundtruth_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=1.0)
unmatched_class_label = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]])
priors = box_list.BoxList(prior_means)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32)
with self.assertRaises(ValueError):
target_assigner.assign(
priors,
boxes,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
class BatchTargetAssignerTest(test_case.TestCase):
def _get_target_assigner(self):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder)
def test_batch_assign_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [None, None]
anchors_boxlist = box_list.BoxList(anchor_means)
agnostic_target_assigner = self._get_target_assigner()
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
agnostic_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1], [0], [0], [0]],
[[0], [1], [1], [0]]]
exp_cls_weights = [[[1], [1], [1], [1]],
[[1], [1], [1], [1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets_with_padded_groundtruth(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
gt_weights = [groundtruth_weights1, groundtruth_weights2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label, gt_weights)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2],
[0., 0., 0., 0.]], dtype=np.float32)
groundtruth_weights1 = np.array([1, 0], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842],
[0, 0, 0, 0]],
dtype=np.float32)
groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multidimensional_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
target_dimensions = (2, 3)
unmatched_class_label = tf.constant(np.zeros(target_dimensions),
tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]]],
[[[0., 0., 0.],
[0., 0., 0.]],
[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 1.],
[0., 0., 1.]],
[[0., 0., 0.],
[0., 0., 0.]]]]
exp_cls_weights = [[[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]],
[[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, gt_class_targets):
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
gt_box_batch = [groundtruth_boxlist]
gt_class_targets_batch = [gt_class_targets]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist,
gt_box_batch, gt_class_targets_batch, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[0, 0]]
num_classes = 3
pad = 1
gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32)
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, gt_class_targets])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
class BatchGetTargetsTest(test_case.TestCase):
def test_scalar_targets(self):
batch_match = np.array([[1, 0, 1],
[-2, -1, 1]], dtype=np.int32)
groundtruth_tensors_list = np.array([[11, 12], [13, 14]], dtype=np.int32)
groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]],
dtype=np.float32)
unmatched_value = np.array(99, dtype=np.int32)
unmatched_weight = np.array(0.0, dtype=np.float32)
def graph_fn(batch_match, groundtruth_tensors_list,
groundtruth_weights_list, unmatched_value, unmatched_weight):
targets, weights = targetassigner.batch_get_targets(
batch_match, tf.unstack(groundtruth_tensors_list),
tf.unstack(groundtruth_weights_list),
unmatched_value, unmatched_weight)
return (targets, weights)
(targets_np, weights_np) = self.execute(graph_fn, [
batch_match, groundtruth_tensors_list, groundtruth_weights_list,
unmatched_value, unmatched_weight
])
self.assertAllEqual([[12, 11, 12],
[99, 99, 14]], targets_np)
self.assertAllClose([[1.0, 1.0, 1.0],
[0.0, 0.0, 0.5]], weights_np)
def test_1d_targets(self):
batch_match = np.array([[1, 0, 1],
[-2, -1, 1]], dtype=np.int32)
groundtruth_tensors_list = np.array([[[11, 12], [12, 13]],
[[13, 14], [14, 15]]],
dtype=np.float32)
groundtruth_weights_list = np.array([[1.0, 1.0], [1.0, 0.5]],
dtype=np.float32)
unmatched_value = np.array([99, 99], dtype=np.float32)
unmatched_weight = np.array(0.0, dtype=np.float32)
def graph_fn(batch_match, groundtruth_tensors_list,
groundtruth_weights_list, unmatched_value, unmatched_weight):
targets, weights = targetassigner.batch_get_targets(
batch_match, tf.unstack(groundtruth_tensors_list),
tf.unstack(groundtruth_weights_list),
unmatched_value, unmatched_weight)
return (targets, weights)
(targets_np, weights_np) = self.execute(graph_fn, [
batch_match, groundtruth_tensors_list, groundtruth_weights_list,
unmatched_value, unmatched_weight
])
self.assertAllClose([[[12, 13], [11, 12], [12, 13]],
[[99, 99], [99, 99], [14, 15]]], targets_np)
self.assertAllClose([[1.0, 1.0, 1.0],
[0.0, 0.0, 0.5]], weights_np)
class BatchTargetAssignConfidencesTest(test_case.TestCase):
def _get_target_assigner(self):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder)
def test_batch_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, gt_class_confidences):
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
gt_box_batch = [groundtruth_boxlist]
gt_class_confidences_batch = [gt_class_confidences]
anchors_boxlist = box_list.BoxList(anchor_means)
num_classes = 3
implicit_class_weight = 0.5
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
multiclass_target_assigner = self._get_target_assigner()
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1]], dtype=np.float32)
num_classes = 3
pad = 1
gt_class_confidences = np.zeros((0, num_classes + pad), dtype=np.float32)
exp_cls_targets = [[[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[0, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn,
[anchor_means, groundtruth_box_corners, gt_class_confidences])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_agnostic(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [None, None]
anchors_boxlist = box_list.BoxList(anchor_means)
agnostic_target_assigner = self._get_target_assigner()
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
agnostic_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
include_background_class=False,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1], [0], [0], [0]],
[[0], [1], [1], [0]]]
exp_cls_weights = [[[1], [0.5], [0.5], [0.5]],
[[0.5], [1], [1], [0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multiclass(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
implicit_class_weight = 0.5
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, -1, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5, 0.5],
[1, 0.5, 0.5, 1],
[0.5, 0.5, 1, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 0, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multiclass_with_padded_groundtruth(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
gt_weights = [groundtruth_weights1, groundtruth_weights2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
gt_weights,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2],
[0., 0., 0., 0.]], dtype=np.float32)
groundtruth_weights1 = np.array([1, 0], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842],
[0, 0, 0, 0]],
dtype=np.float32)
groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 0, 0, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5, 0.5],
[1, 0.5, 0.5, 1],
[0.5, 0.5, 1, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 0, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multidimensional(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
target_dimensions = (2, 3)
unmatched_class_label = tf.constant(np.zeros(target_dimensions),
tf.float32)
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
with self.assertRaises(ValueError):
_, _, _, _ = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
class CreateTargetAssignerTest(test_case.TestCase):
def test_create_target_assigner(self):
"""Tests that named constructor gives working target assigners.
TODO(rathodv): Make this test more general.
"""
corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
if tf_version.is_tf1():
multibox_ta = (targetassigner
.create_target_assigner('Multibox', stage='proposal'))
multibox_ta.assign(priors, groundtruth)
# No tests on output, as that may vary arbitrarily as new target assigners
# are added. As long as it is constructed correctly and runs without errors,
# tests on the individual assigners cover correctness of the assignments.
anchors = box_list.BoxList(tf.constant(corners))
faster_rcnn_proposals_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='proposal'))
faster_rcnn_proposals_ta.assign(anchors, groundtruth)
fast_rcnn_ta = (targetassigner
.create_target_assigner('FastRCNN'))
fast_rcnn_ta.assign(anchors, groundtruth)
faster_rcnn_detection_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='detection'))
faster_rcnn_detection_ta.assign(anchors, groundtruth)
with self.assertRaises(ValueError):
targetassigner.create_target_assigner('InvalidDetector',
stage='invalid_stage')
def _array_argmax(array):
return np.unravel_index(np.argmax(array), array.shape)
class CenterNetCenterHeatmapTargetAssignerTest(test_case.TestCase,
parameterized.TestCase):
def setUp(self):
super(CenterNetCenterHeatmapTargetAssignerTest, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
def test_center_location(self):
"""Test that the centers are at the correct location."""
def graph_fn():
box_batch = [tf.constant([self._box_center, self._box_lower_left])]
classes = [
tf.one_hot([0, 1], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertEqual((10, 10), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 10, 10, 0])
self.assertEqual((15, 5), _array_argmax(targets[0, :, :, 1]))
self.assertAlmostEqual(1.0, targets[0, 15, 5, 1])
@parameterized.parameters(
{'keypoint_weights_for_center': [1.0, 1.0, 1.0, 1.0]},
{'keypoint_weights_for_center': [0.0, 0.0, 1.0, 1.0]},
)
def test_center_location_by_keypoints(self, keypoint_weights_for_center):
"""Test that the centers are at the correct location."""
kpts_y = [[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.0, 0.0, 0.0, 0.0]]
kpts_x = [[0.5, 0.6, 0.7, 0.8], [0.1, 0.2, 0.3, 0.4], [0.0, 0.0, 0.0, 0.0]]
gt_keypoints_list = [
tf.stack([tf.constant(kpts_y), tf.constant(kpts_x)], axis=2)
]
kpts_weight = [[1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0]]
gt_keypoints_weights_list = [tf.constant(kpts_weight)]
gt_classes_list = [
tf.one_hot([0, 0, 0], depth=1),
]
gt_weights_list = [tf.constant([1.0, 1.0, 0.0])]
def graph_fn():
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4,
keypoint_class_id=0,
keypoint_indices=[0, 1, 2, 3],
keypoint_weights_for_center=keypoint_weights_for_center)
targets = assigner.assign_center_targets_from_keypoints(
80,
80,
gt_classes_list=gt_classes_list,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_keypoints_weights_list=gt_keypoints_weights_list)
return targets
targets = self.execute(graph_fn, [])
if sum(keypoint_weights_for_center) == 4.0:
# There should be two peaks at location (5, 13), and (12, 4).
# (5, 13) = ((0.1 + 0.2 + 0.3 + 0.4) / 4 * 80 / 4,
# (0.5 + 0.6 + 0.7 + 0.8) / 4 * 80 / 4)
# (12, 4) = ((0.5 + 0.7) / 2 * 80 / 4,
# (0.1 + 0.3) / 2 * 80 / 4)
self.assertEqual((5, 13), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 5, 13, 0])
self.assertEqual((1, 20, 20, 1), targets.shape)
targets[0, 5, 13, 0] = 0.0
self.assertEqual((12, 4), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 12, 4, 0])
else:
# There should be two peaks at location (5, 13), and (12, 4).
# (7, 15) = ((0.3 + 0.4) / 2 * 80 / 4,
# (0.7 + 0.8) / 2 * 80 / 4)
# (14, 6) = (0.7 * 80 / 4, 0.3 * 80 / 4)
self.assertEqual((7, 15), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 7, 15, 0])
self.assertEqual((1, 20, 20, 1), targets.shape)
targets[0, 7, 15, 0] = 0.0
self.assertEqual((14, 6), _array_argmax(targets[0, :, :, 0]))
self.assertAlmostEqual(1.0, targets[0, 14, 6, 0])
def test_center_batch_shape(self):
"""Test that the shape of the target for a batch is correct."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertEqual((3, 20, 20, 4), targets.shape)
def test_center_overlap_maximum(self):
"""Test that when boxes overlap we, are computing the maximum."""
def graph_fn():
box_batch = [
tf.constant([
self._box_center, self._box_center_offset, self._box_center,
self._box_center_offset
])
]
classes = [
tf.one_hot([0, 0, 1, 2], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
class0_targets = targets[0, :, :, 0]
class1_targets = targets[0, :, :, 1]
class2_targets = targets[0, :, :, 2]
np.testing.assert_allclose(class0_targets,
np.maximum(class1_targets, class2_targets))
def test_size_blur(self):
"""Test that the heatmap of a larger box is more blurred."""
def graph_fn():
box_batch = [tf.constant([self._box_center, self._box_center_small])]
classes = [
tf.one_hot([0, 1], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph_fn, [])
self.assertGreater(
np.count_nonzero(targets[:, :, :, 0]),
np.count_nonzero(targets[:, :, :, 1]))
def test_weights(self):
"""Test that the weights correctly ignore ground truth."""
def graph1_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes)
return targets
targets = self.execute(graph1_fn, [])
self.assertAlmostEqual(1.0, targets[0, :, :, 0].max())
self.assertAlmostEqual(1.0, targets[0, :, :, 1].max())
self.assertAlmostEqual(1.0, targets[1, :, :, 2].max())
self.assertAlmostEqual(1.0, targets[2, :, :, 3].max())
self.assertAlmostEqual(0.0, targets[0, :, :, [2, 3]].max())
self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max())
self.assertAlmostEqual(0.0, targets[2, :, :, :3].max())
def graph2_fn():
weights = [
tf.constant([0., 1.]),
tf.constant([1.]),
tf.constant([1.]),
]
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center]),
tf.constant([self._box_center_small]),
]
classes = [
tf.one_hot([0, 1], depth=4),
tf.one_hot([2], depth=4),
tf.one_hot([3], depth=4),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(4)
targets = assigner.assign_center_targets_from_boxes(80, 80, box_batch,
classes,
weights)
return targets
targets = self.execute(graph2_fn, [])
self.assertAlmostEqual(1.0, targets[0, :, :, 1].max())
self.assertAlmostEqual(1.0, targets[1, :, :, 2].max())
self.assertAlmostEqual(1.0, targets[2, :, :, 3].max())
self.assertAlmostEqual(0.0, targets[0, :, :, [0, 2, 3]].max())
self.assertAlmostEqual(0.0, targets[1, :, :, [0, 1, 3]].max())
self.assertAlmostEqual(0.0, targets[2, :, :, :3].max())
def test_low_overlap(self):
def graph1_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.1)
targets_low_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_low_overlap
targets_low_overlap = self.execute(graph1_fn, [])
self.assertLess(1, np.count_nonzero(targets_low_overlap))
def graph2_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.6)
targets_medium_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_medium_overlap
targets_medium_overlap = self.execute(graph2_fn, [])
self.assertLess(1, np.count_nonzero(targets_medium_overlap))
def graph3_fn():
box_batch = [tf.constant([self._box_center])]
classes = [
tf.one_hot([0], depth=2),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.99)
targets_high_overlap = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets_high_overlap
targets_high_overlap = self.execute(graph3_fn, [])
self.assertTrue(np.all(targets_low_overlap >= targets_medium_overlap))
self.assertTrue(np.all(targets_medium_overlap >= targets_high_overlap))
def test_empty_box_list(self):
"""Test that an empty box list gives an all 0 heatmap."""
def graph_fn():
box_batch = [
tf.zeros((0, 4), dtype=tf.float32),
]
classes = [
tf.zeros((0, 5), dtype=tf.float32),
]
assigner = targetassigner.CenterNetCenterHeatmapTargetAssigner(
4, min_overlap=0.1)
targets = assigner.assign_center_targets_from_boxes(
80, 80, box_batch, classes)
return targets
targets = self.execute(graph_fn, [])
np.testing.assert_allclose(targets, 0.)
class CenterNetBoxTargetAssignerTest(test_case.TestCase):
def setUp(self):
super(CenterNetBoxTargetAssignerTest, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
def test_max_distance_for_overlap(self):
"""Test that the distance ensures the IoU with random boxes."""
# TODO(vighneshb) remove this after the `_smallest_positive_root`
# function if fixed.
self.skipTest(('Skipping test because we are using an incorrect version of'
'the `max_distance_for_overlap` function to reproduce'
' results.'))
rng = np.random.RandomState(0)
n_samples = 100
width = rng.uniform(1, 100, size=n_samples)
height = rng.uniform(1, 100, size=n_samples)
min_iou = rng.uniform(0.1, 1.0, size=n_samples)
def graph_fn():
max_dist = targetassigner.max_distance_for_overlap(height, width, min_iou)
return max_dist
max_dist = self.execute(graph_fn, [])
xmin1 = np.zeros(n_samples)
ymin1 = np.zeros(n_samples)
xmax1 = np.zeros(n_samples) + width
ymax1 = np.zeros(n_samples) + height
xmin2 = max_dist * np.cos(rng.uniform(0, 2 * np.pi))
ymin2 = max_dist * np.sin(rng.uniform(0, 2 * np.pi))
xmax2 = width + max_dist * np.cos(rng.uniform(0, 2 * np.pi))
ymax2 = height + max_dist * np.sin(rng.uniform(0, 2 * np.pi))
boxes1 = np.vstack([ymin1, xmin1, ymax1, xmax1]).T
boxes2 = np.vstack([ymin2, xmin2, ymax2, xmax2]).T
iou = np.diag(np_box_ops.iou(boxes1, boxes2))
self.assertTrue(np.all(iou >= min_iou))
def test_max_distance_for_overlap_centernet(self):
"""Test the version of the function used in the CenterNet paper."""
def graph_fn():
distance = targetassigner.max_distance_for_overlap(10, 5, 0.5)
return distance
distance = self.execute(graph_fn, [])
self.assertAlmostEqual(2.807764064, distance)
def test_assign_size_and_offset_targets(self):
"""Test the assign_size_and_offset_targets function."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center_offset]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
assigner = targetassigner.CenterNetBoxTargetAssigner(4)
indices, hw, yx_offset, weights = assigner.assign_size_and_offset_targets(
80, 80, box_batch)
return indices, hw, yx_offset, weights
indices, hw, yx_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (5, 3))
self.assertEqual(hw.shape, (5, 2))
self.assertEqual(yx_offset.shape, (5, 2))
self.assertEqual(weights.shape, (5,))
np.testing.assert_array_equal(
indices,
[[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_equal(
hw, [[20, 20], [10, 10], [18, 19], [10, 10], [8, 15]])
np.testing.assert_array_equal(
yx_offset, [[0, 0], [0, 0], [0, 0.5], [0, 0], [0.25, 0.75]])
np.testing.assert_array_equal(weights, 1)
def test_assign_size_and_offset_targets_weights(self):
"""Test the assign_size_and_offset_targets function with box weights."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_lower_left, self._box_center_small]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
cn_assigner = targetassigner.CenterNetBoxTargetAssigner(4)
weights_batch = [
tf.constant([0.0, 1.0]),
tf.constant([1.0, 1.0]),
tf.constant([0.0, 0.0])
]
indices, hw, yx_offset, weights = cn_assigner.assign_size_and_offset_targets(
80, 80, box_batch, weights_batch)
return indices, hw, yx_offset, weights
indices, hw, yx_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (6, 3))
self.assertEqual(hw.shape, (6, 2))
self.assertEqual(yx_offset.shape, (6, 2))
self.assertEqual(weights.shape, (6,))
np.testing.assert_array_equal(indices,
[[0, 10, 10], [0, 15, 5], [1, 15, 5],
[1, 10, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_equal(
hw, [[20, 20], [10, 10], [10, 10], [10, 10], [10, 10], [8, 15]])
np.testing.assert_array_equal(
yx_offset, [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0.25, 0.75]])
np.testing.assert_array_equal(weights, [0, 1, 1, 1, 0, 0])
def test_get_batch_predictions_from_indices(self):
"""Test the get_batch_predictions_from_indices function.
This test verifies that the indices returned by
assign_size_and_offset_targets function work as expected with a predicted
tensor.
"""
def graph_fn():
pred_array = np.ones((2, 40, 20, 2), dtype=np.int32) * -1000
pred_array[0, 20, 10] = [1, 2]
pred_array[0, 30, 5] = [3, 4]
pred_array[1, 20, 10] = [5, 6]
pred_array[1, 14, 11] = [7, 8]
pred_tensor = tf.constant(pred_array)
indices = tf.constant([
[0, 20, 10],
[0, 30, 5],
[1, 20, 10],
[1, 14, 11]
], dtype=tf.int32)
preds = targetassigner.get_batch_predictions_from_indices(
pred_tensor, indices)
return preds
preds = self.execute(graph_fn, [])
np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]])
def test_get_batch_predictions_from_indices_with_class(self):
"""Test the get_batch_predictions_from_indices function with class axis.
This test verifies that the indices returned by
assign_size_and_offset_targets function work as expected with a predicted
tensor.
"""
def graph_fn():
pred_array = np.ones((2, 40, 20, 5, 2), dtype=np.int32) * -1000
pred_array[0, 20, 10, 0] = [1, 2]
pred_array[0, 30, 5, 2] = [3, 4]
pred_array[1, 20, 10, 1] = [5, 6]
pred_array[1, 14, 11, 4] = [7, 8]
pred_tensor = tf.constant(pred_array)
indices = tf.constant([
[0, 20, 10, 0],
[0, 30, 5, 2],
[1, 20, 10, 1],
[1, 14, 11, 4]
], dtype=tf.int32)
preds = targetassigner.get_batch_predictions_from_indices(
pred_tensor, indices)
return preds
preds = self.execute(graph_fn, [])
np.testing.assert_array_equal(preds, [[1, 2], [3, 4], [5, 6], [7, 8]])
class CenterNetKeypointTargetAssignerTest(test_case.TestCase):
def test_keypoint_heatmap_targets(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 1.0],
[0.4, 0.1, 0.4, 0.2, 0.1],
[float('nan'), 0.1, 0.5, 0.7, 0.6]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
gt_boxes_list = [
tf.constant(
np.array([[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 1.0, 1.0]]),
dtype=tf.float32)
]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2])
(targets, num_instances_batch,
valid_mask) = cn_assigner.assign_keypoint_heatmap_targets(
120,
80,
gt_keypoints_list,
gt_classes_list,
gt_boxes_list=gt_boxes_list)
return targets, num_instances_batch, valid_mask
targets, num_instances_batch, valid_mask = self.execute(graph_fn, [])
# keypoint (0.5, 0.5) is selected. The peak is expected to appear at the
# center of the image.
self.assertEqual((15, 10), _array_argmax(targets[0, :, :, 1]))
self.assertAlmostEqual(1.0, targets[0, 15, 10, 1])
# No peak for the first class since NaN is selected.
self.assertAlmostEqual(0.0, targets[0, 15, 10, 0])
# Verify the output heatmap shape.
self.assertAllEqual([1, 30, 20, 2], targets.shape)
# Verify the number of instances is correct.
np.testing.assert_array_almost_equal([[0, 1]],
num_instances_batch)
# When calling the function, we specify the class id to be 1 (1th and 3rd)
# instance and the keypoint indices to be [0, 2], meaning that the 1st
# instance is the target class with no valid keypoints in it. As a result,
# the region of the 1st instance boxing box should be blacked out
# (0.0, 0.0, 0.5, 0.5), transfering to (0, 0, 15, 10) in absolute output
# space.
self.assertAlmostEqual(np.sum(valid_mask[:, 0:16, 0:11]), 0.0)
# All other values are 1.0 so the sum is: 30 * 20 - 16 * 11 = 424.
self.assertAlmostEqual(np.sum(valid_mask), 424.0)
def test_assign_keypoints_offset_targets(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2])
(indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list)
return indices, weights, offsets
indices, weights, offsets = self.execute(graph_fn, [])
# Only the last element has positive weight.
np.testing.assert_array_almost_equal(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights)
# Validate the last element's indices and offsets.
np.testing.assert_array_equal([0, 3, 2], indices[7, :])
np.testing.assert_array_almost_equal([0.6, 0.4], offsets[7, :])
def test_assign_keypoint_depths_target(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, 0.7, 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
depths = tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[0.5, 0.0, 7.0, 0.7, 0.4]]),
dtype=tf.float32)
gt_keypoint_depths_list = [depths]
gt_keypoint_depth_weights = tf.constant(
np.array([[1.0, 1.0, 1.0, 1.0, 1.0],
[float('nan'), 0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.5, 1.0, 1.0]]),
dtype=tf.float32)
gt_keypoint_depth_weights_list = [gt_keypoint_depth_weights]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1)
(indices, depths, weights) = cn_assigner.assign_keypoints_depth_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_keypoint_depths_list=gt_keypoint_depths_list,
gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)
return indices, depths, weights
indices, depths, weights = self.execute(graph_fn, [])
# Only the last 5 elements has positive weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5
], weights)
# Validate the last 5 elements' depth value.
np.testing.assert_array_almost_equal(
[7.0, 7.0, 7.0, 7.0, 7.0], depths[35:, 0])
self.assertEqual((40, 3), indices.shape)
np.testing.assert_array_equal([0, 2, 2], indices[35, :])
def test_assign_keypoint_depths_per_keypoints(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, 0.7, 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
depths = tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[0.5, 0.0, 7.0, 0.7, 0.4]]),
dtype=tf.float32)
gt_keypoint_depths_list = [depths]
gt_keypoint_depth_weights = tf.constant(
np.array([[1.0, 1.0, 1.0, 1.0, 1.0],
[float('nan'), 0.0, 1.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.5, 1.0, 1.0]]),
dtype=tf.float32)
gt_keypoint_depth_weights_list = [gt_keypoint_depth_weights]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1,
per_keypoint_depth=True)
(indices, depths, weights) = cn_assigner.assign_keypoints_depth_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_keypoint_depths_list=gt_keypoint_depths_list,
gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)
return indices, depths, weights
indices, depths, weights = self.execute(graph_fn, [])
# Only the last 5 elements has positive weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5
], weights)
# Validate the last 5 elements' depth value.
np.testing.assert_array_almost_equal(
[7.0, 7.0, 7.0, 7.0, 7.0], depths[35:, 0])
self.assertEqual((40, 4), indices.shape)
np.testing.assert_array_equal([0, 2, 2, 1], indices[35, :])
def test_assign_keypoints_offset_targets_radius(self):
def graph_fn():
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1,
per_keypoint_offset=True)
(indices, offsets, weights) = cn_assigner.assign_keypoints_offset_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list)
return indices, weights, offsets
indices, weights, offsets = self.execute(graph_fn, [])
# There are total 8 * 5 (neighbors) = 40 targets.
self.assertAllEqual(indices.shape, [40, 4])
self.assertAllEqual(offsets.shape, [40, 2])
self.assertAllEqual(weights.shape, [40])
# Only the last 5 (radius 1 generates 5 valid points) element has positive
# weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0
], weights)
# Validate the last element's (with neighbors) indices and offsets.
np.testing.assert_array_equal([0, 2, 2, 1], indices[35, :])
np.testing.assert_array_equal([0, 3, 1, 1], indices[36, :])
np.testing.assert_array_equal([0, 3, 2, 1], indices[37, :])
np.testing.assert_array_equal([0, 3, 3, 1], indices[38, :])
np.testing.assert_array_equal([0, 4, 2, 1], indices[39, :])
np.testing.assert_array_almost_equal([1.6, 0.4], offsets[35, :])
np.testing.assert_array_almost_equal([0.6, 1.4], offsets[36, :])
np.testing.assert_array_almost_equal([0.6, 0.4], offsets[37, :])
np.testing.assert_array_almost_equal([0.6, -0.6], offsets[38, :])
np.testing.assert_array_almost_equal([-0.4, 0.4], offsets[39, :])
def test_assign_joint_regression_targets(self):
def graph_fn():
gt_boxes_list = [
tf.constant(
np.array([[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0]]),
dtype=tf.float32)
]
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2])
(indices, offsets, weights) = cn_assigner.assign_joint_regression_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
return indices, offsets, weights
indices, offsets, weights = self.execute(graph_fn, [])
np.testing.assert_array_almost_equal(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], weights)
np.testing.assert_array_equal([0, 15, 10, 1], indices[7, :])
np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[7, :])
def test_assign_joint_regression_targets_radius(self):
def graph_fn():
gt_boxes_list = [
tf.constant(
np.array([[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0]]),
dtype=tf.float32)
]
gt_classes_list = [
tf.one_hot([0, 1, 0, 1], depth=4),
]
coordinates = tf.expand_dims(
tf.constant(
np.array([[0.1, 0.2, 0.3, 0.4, 0.5],
[float('nan'), 0.7, float('nan'), 0.9, 0.4],
[0.4, 0.1, 0.4, 0.2, 0.0],
[float('nan'), 0.0, 0.12, 0.7, 0.4]]),
dtype=tf.float32),
axis=2)
gt_keypoints_list = [tf.concat([coordinates, coordinates], axis=2)]
cn_assigner = targetassigner.CenterNetKeypointTargetAssigner(
stride=4,
class_id=1,
keypoint_indices=[0, 2],
peak_radius=1)
(indices, offsets, weights) = cn_assigner.assign_joint_regression_targets(
height=120,
width=80,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
return indices, offsets, weights
indices, offsets, weights = self.execute(graph_fn, [])
# There are total 8 * 5 (neighbors) = 40 targets.
self.assertAllEqual(indices.shape, [40, 4])
self.assertAllEqual(offsets.shape, [40, 2])
self.assertAllEqual(weights.shape, [40])
# Only the last 5 (radius 1 generates 5 valid points) element has positive
# weight.
np.testing.assert_array_almost_equal([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0
], weights)
# Test the values of the indices and offsets of the last 5 elements.
np.testing.assert_array_equal([0, 14, 10, 1], indices[35, :])
np.testing.assert_array_equal([0, 15, 9, 1], indices[36, :])
np.testing.assert_array_equal([0, 15, 10, 1], indices[37, :])
np.testing.assert_array_equal([0, 15, 11, 1], indices[38, :])
np.testing.assert_array_equal([0, 16, 10, 1], indices[39, :])
np.testing.assert_array_almost_equal([-10.4, -7.6], offsets[35, :])
np.testing.assert_array_almost_equal([-11.4, -6.6], offsets[36, :])
np.testing.assert_array_almost_equal([-11.4, -7.6], offsets[37, :])
np.testing.assert_array_almost_equal([-11.4, -8.6], offsets[38, :])
np.testing.assert_array_almost_equal([-12.4, -7.6], offsets[39, :])
class CenterNetMaskTargetAssignerTest(test_case.TestCase):
def test_assign_segmentation_targets(self):
def graph_fn():
gt_masks_list = [
# Example 0.
tf.constant([
[
[1., 0., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
],
[
[0., 0., 0., 0.],
[0., 0., 0., 1.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
],
[
[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.],
]
], dtype=tf.float32),
# Example 1.
tf.constant([
[
[1., 1., 0., 1.],
[1., 1., 1., 1.],
[0., 0., 1., 1.],
[0., 0., 0., 1.],
],
[
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[1., 1., 0., 0.],
[1., 1., 0., 0.],
],
], dtype=tf.float32),
]
gt_classes_list = [
# Example 0.
tf.constant([[1., 0., 0.],
[0., 1., 0.],
[1., 0., 0.]], dtype=tf.float32),
# Example 1.
tf.constant([[0., 1., 0.],
[0., 1., 0.]], dtype=tf.float32)
]
cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=2)
segmentation_target = cn_assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list,
mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR)
return segmentation_target
segmentation_target = self.execute(graph_fn, [])
expected_seg_target = np.array([
# Example 0 [[class 0, class 1], [background, class 0]]
[[[1, 0, 0], [0, 1, 0]],
[[0, 0, 0], [1, 0, 0]]],
# Example 1 [[class 1, class 1], [class 1, class 1]]
[[[0, 1, 0], [0, 1, 0]],
[[0, 1, 0], [0, 1, 0]]],
], dtype=np.float32)
np.testing.assert_array_almost_equal(
expected_seg_target, segmentation_target)
def test_assign_segmentation_targets_no_objects(self):
def graph_fn():
gt_masks_list = [tf.zeros((0, 5, 5))]
gt_classes_list = [tf.zeros((0, 10))]
cn_assigner = targetassigner.CenterNetMaskTargetAssigner(stride=1)
segmentation_target = cn_assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list,
mask_resize_method=targetassigner.ResizeMethod.NEAREST_NEIGHBOR)
return segmentation_target
segmentation_target = self.execute(graph_fn, [])
expected_seg_target = np.zeros((1, 5, 5, 10))
np.testing.assert_array_almost_equal(
expected_seg_target, segmentation_target)
class CenterNetDensePoseTargetAssignerTest(test_case.TestCase):
def test_assign_part_and_coordinate_targets(self):
def graph_fn():
gt_dp_num_points_list = [
# Example 0.
tf.constant([2, 0, 3], dtype=tf.int32),
# Example 1.
tf.constant([1, 1], dtype=tf.int32),
]
gt_dp_part_ids_list = [
# Example 0.
tf.constant([[1, 6, 0],
[0, 0, 0],
[0, 2, 3]], dtype=tf.int32),
# Example 1.
tf.constant([[7, 0, 0],
[0, 0, 0]], dtype=tf.int32),
]
gt_dp_surface_coords_list = [
# Example 0.
tf.constant(
[[[0.11, 0.2, 0.3, 0.4], # Box 0.
[0.6, 0.4, 0.1, 0.0],
[0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], # Box 1.
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]],
[[0.22, 0.1, 0.6, 0.8], # Box 2.
[0.0, 0.4, 0.5, 1.0],
[0.3, 0.2, 0.4, 0.1]]],
dtype=tf.float32),
# Example 1.
tf.constant(
[[[0.5, 0.5, 0.3, 1.0], # Box 0.
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]],
[[0.2, 0.2, 0.5, 0.8], # Box 1.
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0]]],
dtype=tf.float32),
]
gt_weights_list = [
# Example 0.
tf.constant([1.0, 1.0, 0.5], dtype=tf.float32),
# Example 1.
tf.constant([0.0, 1.0], dtype=tf.float32),
]
cn_assigner = targetassigner.CenterNetDensePoseTargetAssigner(stride=4)
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
cn_assigner.assign_part_and_coordinate_targets(
height=120,
width=80,
gt_dp_num_points_list=gt_dp_num_points_list,
gt_dp_part_ids_list=gt_dp_part_ids_list,
gt_dp_surface_coords_list=gt_dp_surface_coords_list,
gt_weights_list=gt_weights_list))
return batch_indices, batch_part_ids, batch_surface_coords, batch_weights
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
self.execute(graph_fn, []))
expected_batch_indices = np.array([
# Example 0. e.g.
# The first set of indices is calculated as follows:
# floor(0.11*120/4) = 3, floor(0.2*80/4) = 4.
[0, 3, 4, 1], [0, 18, 8, 6], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0],
[0, 0, 0, 0], [0, 6, 2, 0], [0, 0, 8, 2], [0, 9, 4, 3],
# Example 1.
[1, 15, 10, 7], [1, 0, 0, 0], [1, 0, 0, 0], [1, 6, 4, 0], [1, 0, 0, 0],
[1, 0, 0, 0]
], dtype=np.int32)
expected_batch_part_ids = tf.one_hot(
[1, 6, 0, 0, 0, 0, 0, 2, 3, 7, 0, 0, 0, 0, 0], depth=24).numpy()
expected_batch_surface_coords = np.array([
# Box 0.
[0.3, 0.4], [0.1, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0],
[0.6, 0.8], [0.5, 1.0], [0.4, 0.1],
# Box 1.
[0.3, 1.0], [0.0, 0.0], [0.0, 0.0], [0.5, 0.8], [0.0, 0.0], [0.0, 0.0],
], np.float32)
expected_batch_weights = np.array([
# Box 0.
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5,
# Box 1.
0.0, 0.0, 0.0, 1.0, 0.0, 0.0
], dtype=np.float32)
self.assertAllEqual(expected_batch_indices, batch_indices)
self.assertAllEqual(expected_batch_part_ids, batch_part_ids)
self.assertAllClose(expected_batch_surface_coords, batch_surface_coords)
self.assertAllClose(expected_batch_weights, batch_weights)
class CenterNetTrackTargetAssignerTest(test_case.TestCase):
def setUp(self):
super(CenterNetTrackTargetAssignerTest, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
def test_assign_track_targets(self):
"""Test the assign_track_targets function."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_lower_left, self._box_center_small]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
track_id_batch = [
tf.constant([0, 1]),
tf.constant([1, 0]),
tf.constant([0, 2]),
]
assigner = targetassigner.CenterNetTrackTargetAssigner(
stride=4, num_track_ids=3)
(batch_indices, batch_weights,
track_targets) = assigner.assign_track_targets(
height=80,
width=80,
gt_track_ids_list=track_id_batch,
gt_boxes_list=box_batch)
return batch_indices, batch_weights, track_targets
indices, weights, track_ids = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (3, 2, 3))
self.assertEqual(track_ids.shape, (3, 2, 3))
self.assertEqual(weights.shape, (3, 2))
np.testing.assert_array_equal(indices,
[[[0, 10, 10], [0, 15, 5]],
[[1, 15, 5], [1, 10, 10]],
[[2, 10, 10], [2, 7, 11]]])
np.testing.assert_array_equal(track_ids,
[[[1, 0, 0], [0, 1, 0]],
[[0, 1, 0], [1, 0, 0]],
[[1, 0, 0], [0, 0, 1]]])
np.testing.assert_array_equal(weights, [[1, 1], [1, 1], [1, 1]])
def test_assign_track_targets_weights(self):
"""Test the assign_track_targets function with box weights."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_lower_left, self._box_center_small]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
track_id_batch = [
tf.constant([0, 1]),
tf.constant([1, 0]),
tf.constant([0, 2]),
]
weights_batch = [
tf.constant([0.0, 1.0]),
tf.constant([1.0, 1.0]),
tf.constant([0.0, 0.0])
]
assigner = targetassigner.CenterNetTrackTargetAssigner(
stride=4, num_track_ids=3)
(batch_indices, batch_weights,
track_targets) = assigner.assign_track_targets(
height=80,
width=80,
gt_track_ids_list=track_id_batch,
gt_boxes_list=box_batch,
gt_weights_list=weights_batch)
return batch_indices, batch_weights, track_targets
indices, weights, track_ids = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (3, 2, 3))
self.assertEqual(track_ids.shape, (3, 2, 3))
self.assertEqual(weights.shape, (3, 2))
np.testing.assert_array_equal(indices,
[[[0, 10, 10], [0, 15, 5]],
[[1, 15, 5], [1, 10, 10]],
[[2, 10, 10], [2, 7, 11]]])
np.testing.assert_array_equal(track_ids,
[[[1, 0, 0], [0, 1, 0]],
[[0, 1, 0], [1, 0, 0]],
[[1, 0, 0], [0, 0, 1]]])
np.testing.assert_array_equal(weights, [[0, 1], [1, 1], [0, 0]])
# TODO(xwwang): Add a test for the case when no objects are detected.
class CornerOffsetTargetAssignerTest(test_case.TestCase):
def test_filter_overlap_min_area_empty(self):
"""Test that empty masks work on CPU."""
def graph_fn(masks):
return targetassigner.filter_mask_overlap_min_area(masks)
masks = self.execute_cpu(graph_fn, [np.zeros((0, 5, 5), dtype=np.float32)])
self.assertEqual(masks.shape, (0, 5, 5))
def test_filter_overlap_min_area(self):
"""Test the object with min. area is selected instead of overlap."""
def graph_fn(masks):
return targetassigner.filter_mask_overlap_min_area(masks)
masks = np.zeros((3, 4, 4), dtype=np.float32)
masks[0, :2, :2] = 1.0
masks[1, :3, :3] = 1.0
masks[2, 3, 3] = 1.0
masks = self.execute(graph_fn, [masks])
self.assertAllClose(masks[0],
[[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
self.assertAllClose(masks[1],
[[0, 0, 1, 0],
[0, 0, 1, 0],
[1, 1, 1, 0],
[0, 0, 0, 0]])
self.assertAllClose(masks[2],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
def test_assign_corner_offset_single_object(self):
"""Test that corner offsets are correct with a single object."""
assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1)
def graph_fn():
boxes = [
tf.constant([[0., 0., 1., 1.]])
]
mask = np.zeros((1, 4, 4), dtype=np.float32)
mask[0, 1:3, 1:3] = 1.0
masks = [tf.constant(mask)]
return assigner.assign_corner_offset_targets(boxes, masks)
corner_offsets, foreground = self.execute(graph_fn, [])
self.assertAllClose(foreground[0],
[[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]])
self.assertAllClose(corner_offsets[0, :, :, 0],
[[0, 0, 0, 0],
[0, -1, -1, 0],
[0, -2, -2, 0],
[0, 0, 0, 0]])
self.assertAllClose(corner_offsets[0, :, :, 1],
[[0, 0, 0, 0],
[0, -1, -2, 0],
[0, -1, -2, 0],
[0, 0, 0, 0]])
self.assertAllClose(corner_offsets[0, :, :, 2],
[[0, 0, 0, 0],
[0, 3, 3, 0],
[0, 2, 2, 0],
[0, 0, 0, 0]])
self.assertAllClose(corner_offsets[0, :, :, 3],
[[0, 0, 0, 0],
[0, 3, 2, 0],
[0, 3, 2, 0],
[0, 0, 0, 0]])
def test_assign_corner_offset_multiple_objects(self):
"""Test corner offsets are correct with multiple objects."""
assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1)
def graph_fn():
boxes = [
tf.constant([[0., 0., 1., 1.], [0., 0., 0., 0.]]),
tf.constant([[0., 0., .25, .25], [.25, .25, 1., 1.]])
]
mask1 = np.zeros((2, 4, 4), dtype=np.float32)
mask1[0, 0, 0] = 1.0
mask1[0, 3, 3] = 1.0
mask2 = np.zeros((2, 4, 4), dtype=np.float32)
mask2[0, :2, :2] = 1.0
mask2[1, 1:, 1:] = 1.0
masks = [tf.constant(mask1), tf.constant(mask2)]
return assigner.assign_corner_offset_targets(boxes, masks)
corner_offsets, foreground = self.execute(graph_fn, [])
self.assertEqual(corner_offsets.shape, (2, 4, 4, 4))
self.assertEqual(foreground.shape, (2, 4, 4))
self.assertAllClose(foreground[0],
[[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
self.assertAllClose(corner_offsets[0, :, :, 0],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, -3]])
self.assertAllClose(corner_offsets[0, :, :, 1],
[[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, -3]])
self.assertAllClose(corner_offsets[0, :, :, 2],
[[4, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
self.assertAllClose(corner_offsets[0, :, :, 3],
[[4, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
self.assertAllClose(foreground[1],
[[1, 1, 0, 0],
[1, 1, 1, 1],
[0, 1, 1, 1],
[0, 1, 1, 1]])
self.assertAllClose(corner_offsets[1, :, :, 0],
[[0, 0, 0, 0],
[-1, -1, 0, 0],
[0, -1, -1, -1],
[0, -2, -2, -2]])
self.assertAllClose(corner_offsets[1, :, :, 1],
[[0, -1, 0, 0],
[0, -1, -1, -2],
[0, 0, -1, -2],
[0, 0, -1, -2]])
self.assertAllClose(corner_offsets[1, :, :, 2],
[[1, 1, 0, 0],
[0, 0, 3, 3],
[0, 2, 2, 2],
[0, 1, 1, 1]])
self.assertAllClose(corner_offsets[1, :, :, 3],
[[1, 0, 0, 0],
[1, 0, 2, 1],
[0, 3, 2, 1],
[0, 3, 2, 1]])
def test_assign_corner_offsets_no_objects(self):
"""Test assignment works with empty input on cpu."""
assigner = targetassigner.CenterNetCornerOffsetTargetAssigner(stride=1)
def graph_fn():
boxes = [
tf.zeros((0, 4), dtype=tf.float32)
]
masks = [tf.zeros((0, 5, 5), dtype=tf.float32)]
return assigner.assign_corner_offset_targets(boxes, masks)
corner_offsets, foreground = self.execute_cpu(graph_fn, [])
self.assertAllClose(corner_offsets, np.zeros((1, 5, 5, 4)))
self.assertAllClose(foreground, np.zeros((1, 5, 5)))
class CenterNetTemporalOffsetTargetAssigner(test_case.TestCase):
def setUp(self):
super(CenterNetTemporalOffsetTargetAssigner, self).setUp()
self._box_center = [0.0, 0.0, 1.0, 1.0]
self._box_center_small = [0.25, 0.25, 0.75, 0.75]
self._box_lower_left = [0.5, 0.0, 1.0, 0.5]
self._box_center_offset = [0.1, 0.05, 1.0, 1.0]
self._box_odd_coordinates = [0.1625, 0.2125, 0.5625, 0.9625]
self._offset_center = [0.5, 0.4]
self._offset_center_small = [0.1, 0.1]
self._offset_lower_left = [-0.1, 0.1]
self._offset_center_offset = [0.4, 0.3]
self._offset_odd_coord = [0.125, -0.125]
def test_assign_empty_groundtruths(self):
"""Tests the assign_offset_targets function with empty inputs."""
def graph_fn():
box_batch = [
tf.zeros((0, 4), dtype=tf.float32),
]
offset_batch = [
tf.zeros((0, 2), dtype=tf.float32),
]
match_flag_batch = [
tf.zeros((0), dtype=tf.float32),
]
assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4)
indices, temporal_offset, weights = assigner.assign_temporal_offset_targets(
80, 80, box_batch, offset_batch, match_flag_batch)
return indices, temporal_offset, weights
indices, temporal_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (0, 3))
self.assertEqual(temporal_offset.shape, (0, 2))
self.assertEqual(weights.shape, (0,))
def test_assign_offset_targets(self):
"""Tests the assign_offset_targets function."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center_offset]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
offset_batch = [
tf.constant([self._offset_center, self._offset_lower_left]),
tf.constant([self._offset_center_offset]),
tf.constant([self._offset_center_small, self._offset_odd_coord]),
]
match_flag_batch = [
tf.constant([1.0, 1.0]),
tf.constant([1.0]),
tf.constant([1.0, 1.0]),
]
assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4)
indices, temporal_offset, weights = assigner.assign_temporal_offset_targets(
80, 80, box_batch, offset_batch, match_flag_batch)
return indices, temporal_offset, weights
indices, temporal_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (5, 3))
self.assertEqual(temporal_offset.shape, (5, 2))
self.assertEqual(weights.shape, (5,))
np.testing.assert_array_equal(
indices,
[[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_almost_equal(
temporal_offset,
[[0.5, 0.4], [-0.1, 0.1], [0.4, 0.3], [0.1, 0.1], [0.125, -0.125]])
np.testing.assert_array_equal(weights, 1)
def test_assign_offset_targets_with_match_flags(self):
"""Tests the assign_offset_targets function with match flags."""
def graph_fn():
box_batch = [
tf.constant([self._box_center, self._box_lower_left]),
tf.constant([self._box_center_offset]),
tf.constant([self._box_center_small, self._box_odd_coordinates]),
]
offset_batch = [
tf.constant([self._offset_center, self._offset_lower_left]),
tf.constant([self._offset_center_offset]),
tf.constant([self._offset_center_small, self._offset_odd_coord]),
]
match_flag_batch = [
tf.constant([0.0, 1.0]),
tf.constant([1.0]),
tf.constant([1.0, 1.0]),
]
cn_assigner = targetassigner.CenterNetTemporalOffsetTargetAssigner(4)
weights_batch = [
tf.constant([1.0, 0.0]),
tf.constant([1.0]),
tf.constant([1.0, 1.0])
]
indices, temporal_offset, weights = cn_assigner.assign_temporal_offset_targets(
80, 80, box_batch, offset_batch, match_flag_batch, weights_batch)
return indices, temporal_offset, weights
indices, temporal_offset, weights = self.execute(graph_fn, [])
self.assertEqual(indices.shape, (5, 3))
self.assertEqual(temporal_offset.shape, (5, 2))
self.assertEqual(weights.shape, (5,))
np.testing.assert_array_equal(
indices,
[[0, 10, 10], [0, 15, 5], [1, 11, 10], [2, 10, 10], [2, 7, 11]])
np.testing.assert_array_almost_equal(
temporal_offset,
[[0.5, 0.4], [-0.1, 0.1], [0.4, 0.3], [0.1, 0.1], [0.125, -0.125]])
np.testing.assert_array_equal(weights, [0, 0, 1, 1, 1])
class DETRTargetAssignerTest(test_case.TestCase):
def test_assign_detr(self):
def graph_fn(pred_corners, groundtruth_box_corners,
groundtruth_labels, predicted_labels):
detr_target_assigner = targetassigner.DETRTargetAssigner()
pred_boxlist = box_list.BoxList(pred_corners)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = detr_target_assigner.assign(
pred_boxlist, groundtruth_boxlist,
predicted_labels, groundtruth_labels)
(cls_targets, cls_weights, reg_targets, reg_weights) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
pred_corners = np.array([[0.25, 0.25, 0.4, 0.2],
[0.5, 0.8, 1.0, 0.8],
[0.9, 0.5, 0.1, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]],
dtype=np.float32)
predicted_labels = np.array([[-3.0, 3.0], [2.0, 9.4], [5.0, 1.0]],
dtype=np.float32)
groundtruth_labels = np.array([[0.0, 1.0], [0.0, 1.0]],
dtype=np.float32)
exp_cls_targets = [[0, 1], [0, 1], [1, 0]]
exp_cls_weights = [[1, 1], [1, 1], [1, 1]]
exp_reg_targets = [[0.25, 0.25, 0.5, 0.5],
[0.7, 0.7, 0.4, 0.4],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute_cpu(
graph_fn, [pred_corners, groundtruth_box_corners,
groundtruth_labels, predicted_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
def test_batch_assign_detr(self):
def graph_fn(pred_corners, groundtruth_box_corners,
groundtruth_labels, predicted_labels):
detr_target_assigner = targetassigner.DETRTargetAssigner()
result = detr_target_assigner.batch_assign(
pred_corners, groundtruth_box_corners,
[predicted_labels], [groundtruth_labels])
(cls_targets, cls_weights, reg_targets, reg_weights) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
pred_corners = np.array([[[0.25, 0.25, 0.4, 0.2],
[0.5, 0.8, 1.0, 0.8],
[0.9, 0.5, 0.1, 1.0]]], dtype=np.float32)
groundtruth_box_corners = np.array([[[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]]],
dtype=np.float32)
predicted_labels = np.array([[-3.0, 3.0], [2.0, 9.4], [5.0, 1.0]],
dtype=np.float32)
groundtruth_labels = np.array([[0.0, 1.0], [0.0, 1.0]],
dtype=np.float32)
exp_cls_targets = [[[0, 1], [0, 1], [1, 0]]]
exp_cls_weights = [[[1, 1], [1, 1], [1, 1]]]
exp_reg_targets = [[[0.25, 0.25, 0.5, 0.5],
[0.7, 0.7, 0.4, 0.4],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 1, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute_cpu(
graph_fn, [pred_corners, groundtruth_box_corners,
groundtruth_labels, predicted_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEqual(cls_targets_out.dtype, np.float32)
self.assertEqual(cls_weights_out.dtype, np.float32)
self.assertEqual(reg_targets_out.dtype, np.float32)
self.assertEqual(reg_weights_out.dtype, np.float32)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 44.071982
| 86
| 0.543232
|
4a191cd286bb461789bb03bfc24ca48c4cf6fa0a
| 33,425
|
py
|
Python
|
venv/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/asyncpg.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 6
|
2020-10-24T05:05:07.000Z
|
2022-03-24T09:43:05.000Z
|
venv/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/asyncpg.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
venv/lib/python3.9/site-packages/sqlalchemy/dialects/postgresql/asyncpg.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 4
|
2020-10-18T05:37:47.000Z
|
2022-03-25T12:28:06.000Z
|
# postgresql/asyncpg.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: postgresql+asyncpg
:name: asyncpg
:dbapi: asyncpg
:connectstring: postgresql+asyncpg://user:password@host:port/dbname[?key=value&key=value...]
:url: https://magicstack.github.io/asyncpg/
The asyncpg dialect is SQLAlchemy's first Python asyncio dialect.
Using a special asyncio mediation layer, the asyncpg dialect is usable
as the backend for the :ref:`SQLAlchemy asyncio <asyncio_toplevel>`
extension package.
This dialect should normally be used only with the
:func:`_asyncio.create_async_engine` engine creation function::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname")
The dialect can also be run as a "synchronous" dialect within the
:func:`_sa.create_engine` function, which will pass "await" calls into
an ad-hoc event loop. This mode of operation is of **limited use**
and is for special testing scenarios only. The mode can be enabled by
adding the SQLAlchemy-specific flag ``async_fallback`` to the URL
in conjunction with :func:`_sa.create_engine`::
# for testing purposes only; do not use in production!
engine = create_engine("postgresql+asyncpg://user:pass@hostname/dbname?async_fallback=true")
.. versionadded:: 1.4
.. note::
By default asyncpg does not decode the ``json`` and ``jsonb`` types and
returns them as strings. SQLAlchemy sets default type decoder for ``json``
and ``jsonb`` types using the python builtin ``json.loads`` function.
The json implementation used can be changed by setting the attribute
``json_deserializer`` when creating the engine with
:func:`create_engine` or :func:`create_async_engine`.
.. _asyncpg_prepared_statement_cache:
Prepared Statement Cache
--------------------------
The asyncpg SQLAlchemy dialect makes use of ``asyncpg.connection.prepare()``
for all statements. The prepared statement objects are cached after
construction which appears to grant a 10% or more performance improvement for
statement invocation. The cache is on a per-DBAPI connection basis, which
means that the primary storage for prepared statements is within DBAPI
connections pooled within the connection pool. The size of this cache
defaults to 100 statements per DBAPI connection and may be adjusted using the
``prepared_statement_cache_size`` DBAPI argument (note that while this argument
is implemented by SQLAlchemy, it is part of the DBAPI emulation portion of the
asyncpg dialect, therefore is handled as a DBAPI argument, not a dialect
argument)::
engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=500")
To disable the prepared statement cache, use a value of zero::
engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=0")
.. versionadded:: 1.4.0b2 Added ``prepared_statement_cache_size`` for asyncpg.
.. warning:: The ``asyncpg`` database driver necessarily uses caches for
PostgreSQL type OIDs, which become stale when custom PostgreSQL datatypes
such as ``ENUM`` objects are changed via DDL operations. Additionally,
prepared statements themselves which are optionally cached by SQLAlchemy's
driver as described above may also become "stale" when DDL has been emitted
to the PostgreSQL database which modifies the tables or other objects
involved in a particular prepared statement.
The SQLAlchemy asyncpg dialect will invalidate these caches within its local
process when statements that represent DDL are emitted on a local
connection, but this is only controllable within a single Python process /
database engine. If DDL changes are made from other database engines
and/or processes, a running application may encounter asyncpg exceptions
``InvalidCachedStatementError`` and/or ``InternalServerError("cache lookup
failed for type <oid>")`` if it refers to pooled database connections which
operated upon the previous structures. The SQLAlchemy asyncpg dialect will
recover from these error cases when the driver raises these exceptions by
clearing its internal caches as well as those of the asyncpg driver in
response to them, but cannot prevent them from being raised in the first
place if the cached prepared statement or asyncpg type caches have gone
stale, nor can it retry the statement as the PostgreSQL transaction is
invalidated when these errors occur.
""" # noqa
import collections
import decimal
import json as _py_json
import re
import time
from . import json
from .base import _DECIMAL_TYPES
from .base import _FLOAT_TYPES
from .base import _INT_TYPES
from .base import ENUM
from .base import INTERVAL
from .base import OID
from .base import PGCompiler
from .base import PGDialect
from .base import PGExecutionContext
from .base import PGIdentifierPreparer
from .base import REGCLASS
from .base import UUID
from ... import exc
from ... import pool
from ... import processors
from ... import util
from ...sql import sqltypes
from ...util.concurrency import asyncio
from ...util.concurrency import await_fallback
from ...util.concurrency import await_only
try:
from uuid import UUID as _python_UUID # noqa
except ImportError:
_python_UUID = None
class AsyncpgTime(sqltypes.Time):
def get_dbapi_type(self, dbapi):
return dbapi.TIME
class AsyncpgDate(sqltypes.Date):
def get_dbapi_type(self, dbapi):
return dbapi.DATE
class AsyncpgDateTime(sqltypes.DateTime):
def get_dbapi_type(self, dbapi):
if self.timezone:
return dbapi.TIMESTAMP_W_TZ
else:
return dbapi.TIMESTAMP
class AsyncpgBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.BOOLEAN
class AsyncPgInterval(INTERVAL):
def get_dbapi_type(self, dbapi):
return dbapi.INTERVAL
@classmethod
def adapt_emulated_to_native(cls, interval, **kw):
return AsyncPgInterval(precision=interval.second_precision)
class AsyncPgEnum(ENUM):
def get_dbapi_type(self, dbapi):
return dbapi.ENUM
class AsyncpgInteger(sqltypes.Integer):
def get_dbapi_type(self, dbapi):
return dbapi.INTEGER
class AsyncpgBigInteger(sqltypes.BigInteger):
def get_dbapi_type(self, dbapi):
return dbapi.BIGINTEGER
class AsyncpgJSON(json.JSON):
def get_dbapi_type(self, dbapi):
return dbapi.JSON
def result_processor(self, dialect, coltype):
return None
class AsyncpgJSONB(json.JSONB):
def get_dbapi_type(self, dbapi):
return dbapi.JSONB
def result_processor(self, dialect, coltype):
return None
class AsyncpgJSONIndexType(sqltypes.JSON.JSONIndexType):
def get_dbapi_type(self, dbapi):
raise NotImplementedError("should not be here")
class AsyncpgJSONIntIndexType(sqltypes.JSON.JSONIntIndexType):
def get_dbapi_type(self, dbapi):
return dbapi.INTEGER
class AsyncpgJSONStrIndexType(sqltypes.JSON.JSONStrIndexType):
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class AsyncpgJSONPathType(json.JSONPathType):
def bind_processor(self, dialect):
def process(value):
assert isinstance(value, util.collections_abc.Sequence)
tokens = [util.text_type(elem) for elem in value]
return tokens
return process
class AsyncpgUUID(UUID):
def get_dbapi_type(self, dbapi):
return dbapi.UUID
def bind_processor(self, dialect):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid and dialect.use_native_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
class AsyncpgNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype
)
class AsyncpgREGCLASS(REGCLASS):
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class AsyncpgOID(OID):
def get_dbapi_type(self, dbapi):
return dbapi.INTEGER
class PGExecutionContext_asyncpg(PGExecutionContext):
def handle_dbapi_exception(self, e):
if isinstance(
e,
(
self.dialect.dbapi.InvalidCachedStatementError,
self.dialect.dbapi.InternalServerError,
),
):
self.dialect._invalidate_schema_cache()
def pre_exec(self):
if self.isddl:
self.dialect._invalidate_schema_cache()
self.cursor._invalidate_schema_cache_asof = (
self.dialect._invalidate_schema_cache_asof
)
if not self.compiled:
return
# we have to exclude ENUM because "enum" not really a "type"
# we can cast to, it has to be the name of the type itself.
# for now we just omit it from casting
self.exclude_set_input_sizes = {AsyncAdapt_asyncpg_dbapi.ENUM}
def create_server_side_cursor(self):
return self._dbapi_connection.cursor(server_side=True)
class PGCompiler_asyncpg(PGCompiler):
pass
class PGIdentifierPreparer_asyncpg(PGIdentifierPreparer):
pass
class AsyncAdapt_asyncpg_cursor:
__slots__ = (
"_adapt_connection",
"_connection",
"_rows",
"description",
"arraysize",
"rowcount",
"_inputsizes",
"_cursor",
"_invalidate_schema_cache_asof",
)
server_side = False
def __init__(self, adapt_connection):
self._adapt_connection = adapt_connection
self._connection = adapt_connection._connection
self._rows = []
self._cursor = None
self.description = None
self.arraysize = 1
self.rowcount = -1
self._inputsizes = None
self._invalidate_schema_cache_asof = 0
def close(self):
self._rows[:] = []
def _handle_exception(self, error):
self._adapt_connection._handle_exception(error)
def _parameter_placeholders(self, params):
if not self._inputsizes:
return tuple("$%d" % idx for idx, _ in enumerate(params, 1))
else:
return tuple(
"$%d::%s" % (idx, typ) if typ else "$%d" % idx
for idx, typ in enumerate(
(_pg_types.get(typ) for typ in self._inputsizes), 1
)
)
async def _prepare_and_execute(self, operation, parameters):
adapt_connection = self._adapt_connection
async with adapt_connection._execute_mutex:
if not adapt_connection._started:
await adapt_connection._start_transaction()
if parameters is not None:
operation = operation % self._parameter_placeholders(
parameters
)
else:
parameters = ()
try:
prepared_stmt, attributes = await adapt_connection._prepare(
operation, self._invalidate_schema_cache_asof
)
if attributes:
self.description = [
(
attr.name,
attr.type.oid,
None,
None,
None,
None,
None,
)
for attr in attributes
]
else:
self.description = None
if self.server_side:
self._cursor = await prepared_stmt.cursor(*parameters)
self.rowcount = -1
else:
self._rows = await prepared_stmt.fetch(*parameters)
status = prepared_stmt.get_statusmsg()
reg = re.match(
r"(?:UPDATE|DELETE|INSERT \d+) (\d+)", status
)
if reg:
self.rowcount = int(reg.group(1))
else:
self.rowcount = -1
except Exception as error:
self._handle_exception(error)
async def _executemany(self, operation, seq_of_parameters):
adapt_connection = self._adapt_connection
async with adapt_connection._execute_mutex:
await adapt_connection._check_type_cache_invalidation(
self._invalidate_schema_cache_asof
)
if not adapt_connection._started:
await adapt_connection._start_transaction()
operation = operation % self._parameter_placeholders(
seq_of_parameters[0]
)
try:
return await self._connection.executemany(
operation, seq_of_parameters
)
except Exception as error:
self._handle_exception(error)
def execute(self, operation, parameters=None):
self._adapt_connection.await_(
self._prepare_and_execute(operation, parameters)
)
def executemany(self, operation, seq_of_parameters):
return self._adapt_connection.await_(
self._executemany(operation, seq_of_parameters)
)
def setinputsizes(self, *inputsizes):
self._inputsizes = inputsizes
def __iter__(self):
while self._rows:
yield self._rows.pop(0)
def fetchone(self):
if self._rows:
return self._rows.pop(0)
else:
return None
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
retval = self._rows[0:size]
self._rows[:] = self._rows[size:]
return retval
def fetchall(self):
retval = self._rows[:]
self._rows[:] = []
return retval
class AsyncAdapt_asyncpg_ss_cursor(AsyncAdapt_asyncpg_cursor):
server_side = True
__slots__ = ("_rowbuffer",)
def __init__(self, adapt_connection):
super(AsyncAdapt_asyncpg_ss_cursor, self).__init__(adapt_connection)
self._rowbuffer = None
def close(self):
self._cursor = None
self._rowbuffer = None
def _buffer_rows(self):
new_rows = self._adapt_connection.await_(self._cursor.fetch(50))
self._rowbuffer = collections.deque(new_rows)
def __aiter__(self):
return self
async def __anext__(self):
if not self._rowbuffer:
self._buffer_rows()
while True:
while self._rowbuffer:
yield self._rowbuffer.popleft()
self._buffer_rows()
if not self._rowbuffer:
break
def fetchone(self):
if not self._rowbuffer:
self._buffer_rows()
if not self._rowbuffer:
return None
return self._rowbuffer.popleft()
def fetchmany(self, size=None):
if size is None:
return self.fetchall()
if not self._rowbuffer:
self._buffer_rows()
buf = list(self._rowbuffer)
lb = len(buf)
if size > lb:
buf.extend(
self._adapt_connection.await_(self._cursor.fetch(size - lb))
)
result = buf[0:size]
self._rowbuffer = collections.deque(buf[size:])
return result
def fetchall(self):
ret = list(self._rowbuffer) + list(
self._adapt_connection.await_(self._all())
)
self._rowbuffer.clear()
return ret
async def _all(self):
rows = []
# TODO: looks like we have to hand-roll some kind of batching here.
# hardcoding for the moment but this should be improved.
while True:
batch = await self._cursor.fetch(1000)
if batch:
rows.extend(batch)
continue
else:
break
return rows
def executemany(self, operation, seq_of_parameters):
raise NotImplementedError(
"server side cursor doesn't support executemany yet"
)
class AsyncAdapt_asyncpg_connection:
__slots__ = (
"dbapi",
"_connection",
"isolation_level",
"_isolation_setting",
"readonly",
"deferrable",
"_transaction",
"_started",
"_prepared_statement_cache",
"_invalidate_schema_cache_asof",
"_execute_mutex",
)
await_ = staticmethod(await_only)
def __init__(self, dbapi, connection, prepared_statement_cache_size=100):
self.dbapi = dbapi
self._connection = connection
self.isolation_level = self._isolation_setting = "read_committed"
self.readonly = False
self.deferrable = False
self._transaction = None
self._started = False
self._invalidate_schema_cache_asof = time.time()
self._execute_mutex = asyncio.Lock()
if prepared_statement_cache_size:
self._prepared_statement_cache = util.LRUCache(
prepared_statement_cache_size
)
else:
self._prepared_statement_cache = None
async def _check_type_cache_invalidation(self, invalidate_timestamp):
if invalidate_timestamp > self._invalidate_schema_cache_asof:
await self._connection.reload_schema_state()
self._invalidate_schema_cache_asof = invalidate_timestamp
async def _prepare(self, operation, invalidate_timestamp):
await self._check_type_cache_invalidation(invalidate_timestamp)
cache = self._prepared_statement_cache
if cache is None:
prepared_stmt = await self._connection.prepare(operation)
attributes = prepared_stmt.get_attributes()
return prepared_stmt, attributes
# asyncpg uses a type cache for the "attributes" which seems to go
# stale independently of the PreparedStatement itself, so place that
# collection in the cache as well.
if operation in cache:
prepared_stmt, attributes, cached_timestamp = cache[operation]
# preparedstatements themselves also go stale for certain DDL
# changes such as size of a VARCHAR changing, so there is also
# a cross-connection invalidation timestamp
if cached_timestamp > invalidate_timestamp:
return prepared_stmt, attributes
prepared_stmt = await self._connection.prepare(operation)
attributes = prepared_stmt.get_attributes()
cache[operation] = (prepared_stmt, attributes, time.time())
return prepared_stmt, attributes
def _handle_exception(self, error):
if self._connection.is_closed():
self._transaction = None
self._started = False
if not isinstance(error, AsyncAdapt_asyncpg_dbapi.Error):
exception_mapping = self.dbapi._asyncpg_error_translate
for super_ in type(error).__mro__:
if super_ in exception_mapping:
translated_error = exception_mapping[super_](
"%s: %s" % (type(error), error)
)
translated_error.pgcode = (
translated_error.sqlstate
) = getattr(error, "sqlstate", None)
raise translated_error from error
else:
raise error
else:
raise error
@property
def autocommit(self):
return self.isolation_level == "autocommit"
@autocommit.setter
def autocommit(self, value):
if value:
self.isolation_level = "autocommit"
else:
self.isolation_level = self._isolation_setting
def set_isolation_level(self, level):
if self._started:
self.rollback()
self.isolation_level = self._isolation_setting = level
async def _start_transaction(self):
if self.isolation_level == "autocommit":
return
try:
self._transaction = self._connection.transaction(
isolation=self.isolation_level,
readonly=self.readonly,
deferrable=self.deferrable,
)
await self._transaction.start()
except Exception as error:
self._handle_exception(error)
else:
self._started = True
def cursor(self, server_side=False):
if server_side:
return AsyncAdapt_asyncpg_ss_cursor(self)
else:
return AsyncAdapt_asyncpg_cursor(self)
def rollback(self):
if self._started:
try:
self.await_(self._transaction.rollback())
except Exception as error:
self._handle_exception(error)
finally:
self._transaction = None
self._started = False
def commit(self):
if self._started:
try:
self.await_(self._transaction.commit())
except Exception as error:
self._handle_exception(error)
finally:
self._transaction = None
self._started = False
def close(self):
self.rollback()
self.await_(self._connection.close())
class AsyncAdaptFallback_asyncpg_connection(AsyncAdapt_asyncpg_connection):
__slots__ = ()
await_ = staticmethod(await_fallback)
class AsyncAdapt_asyncpg_dbapi:
def __init__(self, asyncpg):
self.asyncpg = asyncpg
self.paramstyle = "format"
def connect(self, *arg, **kw):
async_fallback = kw.pop("async_fallback", False)
prepared_statement_cache_size = kw.pop(
"prepared_statement_cache_size", 100
)
if util.asbool(async_fallback):
return AsyncAdaptFallback_asyncpg_connection(
self,
await_fallback(self.asyncpg.connect(*arg, **kw)),
prepared_statement_cache_size=prepared_statement_cache_size,
)
else:
return AsyncAdapt_asyncpg_connection(
self,
await_only(self.asyncpg.connect(*arg, **kw)),
prepared_statement_cache_size=prepared_statement_cache_size,
)
class Error(Exception):
pass
class Warning(Exception): # noqa
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class InternalError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class DataError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class InternalServerError(InternalError):
pass
class InvalidCachedStatementError(NotSupportedError):
def __init__(self, message):
super(
AsyncAdapt_asyncpg_dbapi.InvalidCachedStatementError, self
).__init__(
message + " (SQLAlchemy asyncpg dialect will now invalidate "
"all prepared caches in response to this exception)",
)
@util.memoized_property
def _asyncpg_error_translate(self):
import asyncpg
return {
asyncpg.exceptions.IntegrityConstraintViolationError: self.IntegrityError, # noqa: E501
asyncpg.exceptions.PostgresError: self.Error,
asyncpg.exceptions.SyntaxOrAccessError: self.ProgrammingError,
asyncpg.exceptions.InterfaceError: self.InterfaceError,
asyncpg.exceptions.InvalidCachedStatementError: self.InvalidCachedStatementError, # noqa: E501
asyncpg.exceptions.InternalServerError: self.InternalServerError,
}
def Binary(self, value):
return value
STRING = util.symbol("STRING")
TIMESTAMP = util.symbol("TIMESTAMP")
TIMESTAMP_W_TZ = util.symbol("TIMESTAMP_W_TZ")
TIME = util.symbol("TIME")
DATE = util.symbol("DATE")
INTERVAL = util.symbol("INTERVAL")
NUMBER = util.symbol("NUMBER")
FLOAT = util.symbol("FLOAT")
BOOLEAN = util.symbol("BOOLEAN")
INTEGER = util.symbol("INTEGER")
BIGINTEGER = util.symbol("BIGINTEGER")
BYTES = util.symbol("BYTES")
DECIMAL = util.symbol("DECIMAL")
JSON = util.symbol("JSON")
JSONB = util.symbol("JSONB")
ENUM = util.symbol("ENUM")
UUID = util.symbol("UUID")
BYTEA = util.symbol("BYTEA")
DATETIME = TIMESTAMP
BINARY = BYTEA
_pg_types = {
AsyncAdapt_asyncpg_dbapi.STRING: "varchar",
AsyncAdapt_asyncpg_dbapi.TIMESTAMP: "timestamp",
AsyncAdapt_asyncpg_dbapi.TIMESTAMP_W_TZ: "timestamp with time zone",
AsyncAdapt_asyncpg_dbapi.DATE: "date",
AsyncAdapt_asyncpg_dbapi.TIME: "time",
AsyncAdapt_asyncpg_dbapi.INTERVAL: "interval",
AsyncAdapt_asyncpg_dbapi.NUMBER: "numeric",
AsyncAdapt_asyncpg_dbapi.FLOAT: "float",
AsyncAdapt_asyncpg_dbapi.BOOLEAN: "bool",
AsyncAdapt_asyncpg_dbapi.INTEGER: "integer",
AsyncAdapt_asyncpg_dbapi.BIGINTEGER: "bigint",
AsyncAdapt_asyncpg_dbapi.BYTES: "bytes",
AsyncAdapt_asyncpg_dbapi.DECIMAL: "decimal",
AsyncAdapt_asyncpg_dbapi.JSON: "json",
AsyncAdapt_asyncpg_dbapi.JSONB: "jsonb",
AsyncAdapt_asyncpg_dbapi.ENUM: "enum",
AsyncAdapt_asyncpg_dbapi.UUID: "uuid",
AsyncAdapt_asyncpg_dbapi.BYTEA: "bytea",
}
class PGDialect_asyncpg(PGDialect):
driver = "asyncpg"
supports_statement_cache = True
supports_unicode_statements = True
supports_server_side_cursors = True
supports_unicode_binds = True
default_paramstyle = "format"
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_asyncpg
statement_compiler = PGCompiler_asyncpg
preparer = PGIdentifierPreparer_asyncpg
use_setinputsizes = True
use_native_uuid = True
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Time: AsyncpgTime,
sqltypes.Date: AsyncpgDate,
sqltypes.DateTime: AsyncpgDateTime,
sqltypes.Interval: AsyncPgInterval,
INTERVAL: AsyncPgInterval,
UUID: AsyncpgUUID,
sqltypes.Boolean: AsyncpgBoolean,
sqltypes.Integer: AsyncpgInteger,
sqltypes.BigInteger: AsyncpgBigInteger,
sqltypes.Numeric: AsyncpgNumeric,
sqltypes.JSON: AsyncpgJSON,
json.JSONB: AsyncpgJSONB,
sqltypes.JSON.JSONPathType: AsyncpgJSONPathType,
sqltypes.JSON.JSONIndexType: AsyncpgJSONIndexType,
sqltypes.JSON.JSONIntIndexType: AsyncpgJSONIntIndexType,
sqltypes.JSON.JSONStrIndexType: AsyncpgJSONStrIndexType,
sqltypes.Enum: AsyncPgEnum,
OID: AsyncpgOID,
REGCLASS: AsyncpgREGCLASS,
},
)
is_async = True
_invalidate_schema_cache_asof = 0
def _invalidate_schema_cache(self):
self._invalidate_schema_cache_asof = time.time()
@util.memoized_property
def _dbapi_version(self):
if self.dbapi and hasattr(self.dbapi, "__version__"):
return tuple(
[
int(x)
for x in re.findall(
r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
)
]
)
else:
return (99, 99, 99)
@classmethod
def dbapi(cls):
return AsyncAdapt_asyncpg_dbapi(__import__("asyncpg"))
@util.memoized_property
def _isolation_lookup(self):
return {
"AUTOCOMMIT": "autocommit",
"READ COMMITTED": "read_committed",
"REPEATABLE READ": "repeatable_read",
"SERIALIZABLE": "serializable",
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace("_", " ")]
except KeyError as err:
util.raise_(
exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (level, self.name, ", ".join(self._isolation_lookup))
),
replace_context=err,
)
connection.set_isolation_level(level)
def set_readonly(self, connection, value):
connection.readonly = value
def get_readonly(self, connection):
return connection.readonly
def set_deferrable(self, connection, value):
connection.deferrable = value
def get_deferrable(self, connection):
return connection.deferrable
def create_connect_args(self, url):
opts = url.translate_connect_args(username="user")
opts.update(url.query)
util.coerce_kw_type(opts, "prepared_statement_cache_size", int)
util.coerce_kw_type(opts, "port", int)
return ([], opts)
@classmethod
def get_pool_class(cls, url):
async_fallback = url.query.get("async_fallback", False)
if util.asbool(async_fallback):
return pool.FallbackAsyncAdaptedQueuePool
else:
return pool.AsyncAdaptedQueuePool
def is_disconnect(self, e, connection, cursor):
if connection:
return connection._connection.is_closed()
else:
return isinstance(
e, self.dbapi.InterfaceError
) and "connection is closed" in str(e)
def do_set_input_sizes(self, cursor, list_of_tuples, context):
if self.positional:
cursor.setinputsizes(
*[dbtype for key, dbtype, sqltype in list_of_tuples]
)
else:
cursor.setinputsizes(
**{
key: dbtype
for key, dbtype, sqltype in list_of_tuples
if dbtype
}
)
def on_connect(self):
super_connect = super(PGDialect_asyncpg, self).on_connect()
def _jsonb_encoder(str_value):
# \x01 is the prefix for jsonb used by PostgreSQL.
# asyncpg requires it when format='binary'
return b"\x01" + str_value.encode()
deserializer = self._json_deserializer or _py_json.loads
def _json_decoder(bin_value):
return deserializer(bin_value.decode())
def _jsonb_decoder(bin_value):
# the byte is the \x01 prefix for jsonb used by PostgreSQL.
# asyncpg returns it when format='binary'
return deserializer(bin_value[1:].decode())
async def _setup_type_codecs(conn):
"""set up type decoders at the asyncpg level.
these are set_type_codec() calls to normalize
There was a tentative decoder for the "char" datatype here
to have it return strings however this type is actually a binary
type that other drivers are likely mis-interpreting.
See https://github.com/MagicStack/asyncpg/issues/623 for reference
on why it's set up this way.
"""
await conn._connection.set_type_codec(
"json",
encoder=str.encode,
decoder=_json_decoder,
schema="pg_catalog",
format="binary",
)
await conn._connection.set_type_codec(
"jsonb",
encoder=_jsonb_encoder,
decoder=_jsonb_decoder,
schema="pg_catalog",
format="binary",
)
def connect(conn):
conn.await_(_setup_type_codecs(conn))
if super_connect is not None:
super_connect(conn)
return connect
dialect = PGDialect_asyncpg
| 31.833333
| 116
| 0.630217
|
4a191cd3a75baa6521c06c7831088cce735c0bff
| 1,727
|
py
|
Python
|
grr/test_lib/stats_test_lib.py
|
dekoder/grr
|
27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b
|
[
"Apache-2.0"
] | 3
|
2018-09-30T01:31:29.000Z
|
2019-04-22T11:44:54.000Z
|
grr/test_lib/stats_test_lib.py
|
tomchop/grr
|
27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b
|
[
"Apache-2.0"
] | 1
|
2022-03-02T09:58:05.000Z
|
2022-03-02T09:58:05.000Z
|
grr/test_lib/stats_test_lib.py
|
tomchop/grr
|
27ba38dc0f5ad4f3e0cdbfb146a0a789e3b0d27b
|
[
"Apache-2.0"
] | 1
|
2018-08-30T14:50:24.000Z
|
2018-08-30T14:50:24.000Z
|
#!/usr/bin/env python
"""Classes for stats-related testing."""
import mock
from grr_response_core.lib import stats
class StatsDeltaAssertionContext(object):
"""A context manager to check the stats variable changes."""
def __init__(self, test, delta, varname, fields=None):
self.test = test
self.varname = varname
self.fields = fields
self.delta = delta
def __enter__(self):
self.prev_count = stats.STATS.GetMetricValue(
self.varname, fields=self.fields)
# Handle the case when we're dealing with distributions.
if hasattr(self.prev_count, "count"):
self.prev_count = self.prev_count.count
def __exit__(self, unused_type, unused_value, unused_traceback):
new_count = stats.STATS.GetMetricValue(
varname=self.varname, fields=self.fields)
if hasattr(new_count, "count"):
new_count = new_count.count
self.test.assertEqual(
new_count - self.prev_count, self.delta,
"%s (fields=%s) expected to change with delta=%d" %
(self.varname, self.fields, self.delta))
class StatsTestMixin(object):
"""Mixing for stats-related assertions."""
def setUp(self): # pylint: disable=invalid-name
super(StatsTestMixin, self).setUp()
self._stats_patcher = mock.patch.object(stats, "STATS",
stats.StatsCollector())
self._stats_patcher.start()
def tearDown(self): # pylint: disable=invalid-name
self._stats_patcher.stop()
super(StatsTestMixin, self).tearDown()
# pylint: disable=invalid-name
def assertStatsCounterDelta(self, delta, varname, fields=None):
return StatsDeltaAssertionContext(self, delta, varname, fields=fields)
# pylint: enable=invalid-name
| 31.4
| 74
| 0.693109
|
4a191cf374f3a4314a764ad3bcb06ed3218a2a9e
| 1,866
|
py
|
Python
|
tests/fixtures.py
|
dtpryce/MLServer
|
02744b3c770141b0b1d9dad2a0256d243051de61
|
[
"Apache-2.0"
] | null | null | null |
tests/fixtures.py
|
dtpryce/MLServer
|
02744b3c770141b0b1d9dad2a0256d243051de61
|
[
"Apache-2.0"
] | null | null | null |
tests/fixtures.py
|
dtpryce/MLServer
|
02744b3c770141b0b1d9dad2a0256d243051de61
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
from mlserver import MLModel
from mlserver.types import InferenceRequest, InferenceResponse, Parameters
from mlserver.codecs import NumpyCodec
from mlserver.handlers.custom import custom_handler
from mlserver.errors import MLServerError
class SumModel(MLModel):
@custom_handler(rest_path="/my-custom-endpoint")
async def my_payload(self, payload: list) -> int:
return sum(payload)
async def predict(self, payload: InferenceRequest) -> InferenceResponse:
decoded = self.decode(payload.inputs[0])
total = decoded.sum(axis=1, keepdims=True)
output = NumpyCodec.encode(name="total", payload=total)
response = InferenceResponse(
id=payload.id,
model_name=self.name,
model_version=self.version,
outputs=[output],
)
if payload.parameters and payload.parameters.headers:
# "Echo" headers back prefixed by `x-`
request_headers = payload.parameters.headers
response_headers = {}
for header_name, header_value in request_headers.items():
if header_name.startswith("x-"):
response_headers[header_name] = header_value
response.parameters = Parameters(headers=response_headers)
return response
class ErrorModel(MLModel):
error_message = "something really bad happened"
async def predict(self, payload: InferenceRequest) -> InferenceResponse:
raise MLServerError(self.error_message)
class SlowModel(MLModel):
async def load(self) -> bool:
await asyncio.sleep(10)
self.ready = True
return self.ready
async def infer(self, payload: InferenceRequest) -> InferenceResponse:
await asyncio.sleep(10)
return InferenceResponse(id=payload.id, model_name=self.name, outputs=[])
| 33.321429
| 81
| 0.680064
|
4a191dc6c97b69b485766a9c1a70680f676e7182
| 20,914
|
py
|
Python
|
main.py
|
joehalliwell/taming-transformers
|
8f92908403cc25a6edeab1eee0eb89950c08e4a6
|
[
"MIT"
] | null | null | null |
main.py
|
joehalliwell/taming-transformers
|
8f92908403cc25a6edeab1eee0eb89950c08e4a6
|
[
"MIT"
] | null | null | null |
main.py
|
joehalliwell/taming-transformers
|
8f92908403cc25a6edeab1eee0eb89950c08e4a6
|
[
"MIT"
] | null | null | null |
import argparse, os, sys, datetime, glob, importlib
from omegaconf import OmegaConf
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.utils.data import random_split, DataLoader, Dataset
import pytorch_lightning as pl
from pytorch_lightning import seed_everything
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor
from pytorch_lightning.utilities.distributed import rank_zero_only
from taming import get_obj_from_str, instantiate_from_config
def get_parser(**parser_kwargs):
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
parser = argparse.ArgumentParser(**parser_kwargs)
parser.add_argument(
"-n",
"--name",
type=str,
const=True,
default="",
nargs="?",
help="postfix for logdir",
)
parser.add_argument(
"-r",
"--resume",
type=str,
const=True,
default="",
nargs="?",
help="resume from logdir or checkpoint in logdir",
)
parser.add_argument(
"-b",
"--base",
nargs="*",
metavar="base_config.yaml",
help="paths to base configs. Loaded from left-to-right. "
"Parameters can be overwritten or added with command-line options of the form `--key value`.",
default=list(),
)
parser.add_argument(
"-t",
"--train",
type=str2bool,
const=True,
default=False,
nargs="?",
help="train",
)
parser.add_argument(
"--no-test",
type=str2bool,
const=True,
default=False,
nargs="?",
help="disable test",
)
parser.add_argument("-p", "--project", help="name of new or path to existing project")
parser.add_argument(
"-d",
"--debug",
type=str2bool,
nargs="?",
const=True,
default=False,
help="enable post-mortem debugging",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=23,
help="seed for seed_everything",
)
parser.add_argument(
"-f",
"--postfix",
type=str,
default="",
help="post-postfix for default name",
)
return parser
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
class WrappedDataset(Dataset):
"""Wraps an arbitrary object with __len__ and __getitem__ into a pytorch dataset"""
def __init__(self, dataset):
self.data = dataset
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
class DataModuleFromConfig(pl.LightningDataModule):
def __init__(self, batch_size, train=None, validation=None, test=None,
wrap=False, num_workers=None):
super().__init__()
self.batch_size = batch_size
self.dataset_configs = dict()
self.num_workers = num_workers if num_workers is not None else batch_size*2
if train is not None:
self.dataset_configs["train"] = train
self.train_dataloader = self._train_dataloader
if validation is not None:
self.dataset_configs["validation"] = validation
self.val_dataloader = self._val_dataloader
if test is not None:
self.dataset_configs["test"] = test
self.test_dataloader = self._test_dataloader
self.wrap = wrap
def prepare_data(self):
for data_cfg in self.dataset_configs.values():
instantiate_from_config(data_cfg)
def setup(self, stage=None):
self.datasets = dict(
(k, instantiate_from_config(self.dataset_configs[k]))
for k in self.dataset_configs)
if self.wrap:
for k in self.datasets:
self.datasets[k] = WrappedDataset(self.datasets[k])
def _train_dataloader(self):
return DataLoader(self.datasets["train"], batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=True)
def _val_dataloader(self):
return DataLoader(self.datasets["validation"],
batch_size=self.batch_size,
num_workers=self.num_workers)
def _test_dataloader(self):
return DataLoader(self.datasets["test"], batch_size=self.batch_size,
num_workers=self.num_workers)
class SetupCallback(Callback):
def __init__(self, resume, now, logdir, ckptdir, cfgdir, config, lightning_config):
super().__init__()
self.resume = resume
self.now = now
self.logdir = logdir
self.ckptdir = ckptdir
self.cfgdir = cfgdir
self.config = config
self.lightning_config = lightning_config
def on_pretrain_routine_start(self, trainer, pl_module):
if trainer.global_rank == 0:
# Create logdirs and save configs
os.makedirs(self.logdir, exist_ok=True)
os.makedirs(self.ckptdir, exist_ok=True)
os.makedirs(self.cfgdir, exist_ok=True)
print("Project config")
print(self.config.pretty())
OmegaConf.save(self.config,
os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)))
print("Lightning config")
print(self.lightning_config.pretty())
OmegaConf.save(OmegaConf.create({"lightning": self.lightning_config}),
os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)))
else:
# ModelCheckpoint callback created log directory --- remove it
if not self.resume and os.path.exists(self.logdir):
dst, name = os.path.split(self.logdir)
dst = os.path.join(dst, "child_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
try:
os.rename(self.logdir, dst)
except FileNotFoundError:
pass
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True):
super().__init__()
self.batch_freq = batch_frequency
self.max_images = max_images
self.logger_log_images = {
pl.loggers.WandbLogger: self._wandb,
pl.loggers.TestTubeLogger: self._testtube,
}
self.log_steps = [2 ** n for n in range(int(np.log2(self.batch_freq)) + 1)]
if not increase_log_steps:
self.log_steps = [self.batch_freq]
self.clamp = clamp
@rank_zero_only
def _wandb(self, pl_module, images, batch_idx, split):
raise ValueError("No way wandb")
grids = dict()
for k in images:
grid = torchvision.utils.make_grid(images[k])
grids[f"{split}/{k}"] = wandb.Image(grid)
pl_module.logger.experiment.log(grids)
@rank_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = (grid+1.0)/2.0 # -1,1 -> 0,1; c,h,w
tag = f"{split}/{k}"
pl_module.logger.experiment.add_image(
tag, grid,
global_step=pl_module.global_step)
@rank_zero_only
def log_local(self, save_dir, split, images,
global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, "images", split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
grid = (grid+1.0)/2.0 # -1,1 -> 0,1; c,h,w
grid = grid.transpose(0,1).transpose(1,2).squeeze(-1)
grid = grid.numpy()
grid = (grid*255).astype(np.uint8)
filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
k,
global_step,
current_epoch,
batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split="train"):
if (self.check_frequency(batch_idx) and # batch_idx % self.batch_freq == 0
hasattr(pl_module, "log_images") and
callable(pl_module.log_images) and
self.max_images > 0):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], -1., 1.)
self.log_local(pl_module.logger.save_dir, split, images,
pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, lambda *args, **kwargs: None)
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, batch_idx):
if (batch_idx % self.batch_freq) == 0 or (batch_idx in self.log_steps):
try:
self.log_steps.pop(0)
except IndexError:
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log_img(pl_module, batch, batch_idx, split="train")
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log_img(pl_module, batch, batch_idx, split="val")
if __name__ == "__main__":
# custom parser to specify config files, train, test and debug mode,
# postfix, resume.
# `--key value` arguments are interpreted as arguments to the trainer.
# `nested.key=value` arguments are interpreted as config parameters.
# configs are merged from left-to-right followed by command line parameters.
# model:
# base_learning_rate: float
# target: path to lightning module
# params:
# key: value
# data:
# target: main.DataModuleFromConfig
# params:
# batch_size: int
# wrap: bool
# train:
# target: path to train dataset
# params:
# key: value
# validation:
# target: path to validation dataset
# params:
# key: value
# test:
# target: path to test dataset
# params:
# key: value
# lightning: (optional, has sane defaults and can be specified on cmdline)
# trainer:
# additional arguments to trainer
# logger:
# logger to instantiate
# modelcheckpoint:
# modelcheckpoint to instantiate
# callbacks:
# callback1:
# target: importpath
# params:
# key: value
now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
# add cwd for convenience and to make classes in this file available when
# running as `python main.py`
# (in particular `main.DataModuleFromConfig`)
sys.path.append(os.getcwd())
parser = get_parser()
parser = Trainer.add_argparse_args(parser)
opt, unknown = parser.parse_known_args()
if opt.name and opt.resume:
raise ValueError(
"-n/--name and -r/--resume cannot be specified both."
"If you want to resume training in a new log folder, "
"use -n/--name in combination with --resume_from_checkpoint"
)
if opt.resume:
if not os.path.exists(opt.resume):
raise ValueError("Cannot find {}".format(opt.resume))
if os.path.isfile(opt.resume):
paths = opt.resume.split("/")
idx = len(paths)-paths[::-1].index("logs")+1
logdir = "/".join(paths[:idx])
ckpt = opt.resume
else:
assert os.path.isdir(opt.resume), opt.resume
logdir = opt.resume.rstrip("/")
ckpt = os.path.join(logdir, "checkpoints", "last.ckpt")
opt.resume_from_checkpoint = ckpt
base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
opt.base = base_configs+opt.base
_tmp = logdir.split("/")
nowname = _tmp[_tmp.index("logs")+1]
else:
if opt.name:
name = "_"+opt.name
elif opt.base:
cfg_fname = os.path.split(opt.base[0])[-1]
cfg_name = os.path.splitext(cfg_fname)[0]
name = "_"+cfg_name
else:
name = ""
nowname = now+name+opt.postfix
logdir = os.path.join("logs", nowname)
ckptdir = os.path.join(logdir, "checkpoints")
cfgdir = os.path.join(logdir, "configs")
seed_everything(opt.seed)
try:
# init and save configs
configs = [OmegaConf.load(cfg) for cfg in opt.base]
cli = OmegaConf.from_dotlist(unknown)
config = OmegaConf.merge(*configs, cli)
lightning_config = config.pop("lightning", OmegaConf.create())
# merge trainer cli with config
trainer_config = lightning_config.get("trainer", OmegaConf.create())
# default to ddp
trainer_config["distributed_backend"] = "ddp"
for k in nondefault_trainer_args(opt):
trainer_config[k] = getattr(opt, k)
if not "gpus" in trainer_config:
del trainer_config["distributed_backend"]
cpu = True
else:
gpuinfo = trainer_config["gpus"]
print(f"Running on GPUs {gpuinfo}")
cpu = False
trainer_opt = argparse.Namespace(**trainer_config)
lightning_config.trainer = trainer_config
# model
model = instantiate_from_config(config.model)
# trainer and callbacks
trainer_kwargs = dict()
# default logger configs
# NOTE wandb < 0.10.0 interferes with shutdown
# wandb >= 0.10.0 seems to fix it but still interferes with pudb
# debugging (wrongly sized pudb ui)
# thus prefer testtube for now
default_logger_cfgs = {
"wandb": {
"target": "pytorch_lightning.loggers.WandbLogger",
"params": {
"name": nowname,
"save_dir": logdir,
"offline": opt.debug,
"id": nowname,
}
},
"testtube": {
"target": "pytorch_lightning.loggers.TestTubeLogger",
"params": {
"name": "testtube",
"save_dir": logdir,
}
},
}
default_logger_cfg = default_logger_cfgs["testtube"]
logger_cfg = lightning_config.logger or OmegaConf.create()
logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
# modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
# specify which metric is used to determine best models
default_modelckpt_cfg = {
"target": "pytorch_lightning.callbacks.ModelCheckpoint",
"params": {
"dirpath": ckptdir,
"filename": "{epoch:06}",
"verbose": True,
"save_last": True,
}
}
if hasattr(model, "monitor"):
print(f"Monitoring {model.monitor} as checkpoint metric.")
default_modelckpt_cfg["params"]["monitor"] = model.monitor
default_modelckpt_cfg["params"]["save_top_k"] = 3
modelckpt_cfg = lightning_config.modelcheckpoint or OmegaConf.create()
modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
trainer_kwargs["checkpoint_callback"] = instantiate_from_config(modelckpt_cfg)
# add callback which sets up log directory
default_callbacks_cfg = {
"setup_callback": {
"target": "main.SetupCallback",
"params": {
"resume": opt.resume,
"now": now,
"logdir": logdir,
"ckptdir": ckptdir,
"cfgdir": cfgdir,
"config": config,
"lightning_config": lightning_config,
}
},
"image_logger": {
"target": "main.ImageLogger",
"params": {
"batch_frequency": 750,
"max_images": 4,
"clamp": True
}
},
"learning_rate_logger": {
"target": "main.LearningRateMonitor",
"params": {
"logging_interval": "step",
#"log_momentum": True
}
},
}
callbacks_cfg = lightning_config.callbacks or OmegaConf.create()
callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
trainer_kwargs["callbacks"] = [instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg]
trainer = Trainer.from_argparse_args(trainer_opt, **trainer_kwargs)
# data
data = instantiate_from_config(config.data)
# NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
# calling these ourselves should not be necessary but it is.
# lightning still takes care of proper multiprocessing though
data.prepare_data()
data.setup()
# configure learning rate
bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
if not cpu:
ngpu = len(lightning_config.trainer.gpus.strip(",").split(','))
else:
ngpu = 1
accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches or 1
print(f"accumulate_grad_batches = {accumulate_grad_batches}")
lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
print("Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr))
# allow checkpointing via USR1
def melk(*args, **kwargs):
# run all checkpoint hooks
if trainer.global_rank == 0:
print("Summoning checkpoint.")
ckpt_path = os.path.join(ckptdir, "last.ckpt")
trainer.save_checkpoint(ckpt_path)
def divein(*args, **kwargs):
if trainer.global_rank == 0:
import pudb; pudb.set_trace()
import signal
signal.signal(signal.SIGUSR1, melk)
signal.signal(signal.SIGUSR2, divein)
# run
if opt.train:
try:
trainer.fit(model, data)
except Exception:
melk()
raise
if not opt.no_test and not trainer.interrupted:
trainer.test(model, data)
except Exception:
if opt.debug and trainer.global_rank==0:
try:
import pudb as debugger
except ImportError:
import pdb as debugger
debugger.post_mortem()
raise
finally:
# move newly created debug project to debug_runs
if opt.debug and not opt.resume and trainer.global_rank==0:
dst, name = os.path.split(logdir)
dst = os.path.join(dst, "debug_runs", name)
os.makedirs(os.path.split(dst)[0], exist_ok=True)
os.rename(logdir, dst)
| 36.246101
| 138
| 0.57703
|
4a191dee830da0ceef82b3ea8706a02a95a1e7e8
| 43,733
|
py
|
Python
|
tests/unit/api/metadata/test_v0.py
|
tianruzhou-db/amundsenfrontendlibrary
|
d1afcf6af17f5c459a5913364138e37f240acfac
|
[
"Apache-2.0"
] | 3
|
2021-02-09T13:52:03.000Z
|
2022-02-26T02:36:02.000Z
|
tests/unit/api/metadata/test_v0.py
|
tianruzhou-db/amundsenfrontendlibrary
|
d1afcf6af17f5c459a5913364138e37f240acfac
|
[
"Apache-2.0"
] | 51
|
2020-12-11T23:23:55.000Z
|
2022-03-18T23:38:04.000Z
|
tests/unit/api/metadata/test_v0.py
|
tianruzhou-db/amundsenfrontendlibrary
|
d1afcf6af17f5c459a5913364138e37f240acfac
|
[
"Apache-2.0"
] | 2
|
2021-02-23T18:23:35.000Z
|
2022-03-18T15:12:25.000Z
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import json
import responses
import unittest
from unittest.mock import patch
from http import HTTPStatus
from amundsen_application import create_app
from amundsen_application.api.metadata.v0 import TABLE_ENDPOINT, LAST_INDEXED_ENDPOINT,\
POPULAR_TABLES_ENDPOINT, TAGS_ENDPOINT, USER_ENDPOINT, DASHBOARD_ENDPOINT
from amundsen_application.config import MatchRuleObject
from amundsen_application.tests.test_utils import TEST_USER_ID
local_app = create_app('amundsen_application.config.TestConfig', 'tests/templates')
class MetadataTest(unittest.TestCase):
def setUp(self) -> None:
self.mock_popular_tables = {
'popular_tables': [
{
'cluster': 'test_cluster',
'database': 'test_db',
'schema': 'test_schema',
'description': 'This is a test',
'name': 'test_table',
'key': 'test_db://test_cluster.test_schema/test_table',
}
]
}
self.expected_parsed_popular_tables = [
{
'cluster': 'test_cluster',
'database': 'test_db',
'description': 'This is a test',
'schema': 'test_schema',
'type': 'table',
'name': 'test_table',
'key': 'test_db://test_cluster.test_schema/test_table',
'last_updated_timestamp': None,
}
]
self.mock_metadata = {
'cluster': 'test_cluster',
'columns': [
{
'name': 'column_1',
'description': 'This is a test',
'col_type': 'bigint',
'sort_order': 0,
'stats': [
{'stat_type': 'count', 'stat_val': '100', 'start_epoch': 1538352000, 'end_epoch': 1538352000},
{'stat_type': 'count_null', 'stat_val': '0', 'start_epoch': 1538352000, 'end_epoch': 1538352000}
]
}
],
'database': 'test_db',
'is_view': False,
'key': 'test_db://test_cluster.test_schema/test_table',
'last_updated_timestamp': 1563872712,
'owners': [],
'schema': 'test_schema',
'name': 'test_table',
'description': 'This is a test',
'resource_reports': [],
'programmatic_descriptions': [
{'source': 'c_1', 'text': 'description c'},
{'source': 'a_1', 'text': 'description a'},
{'source': 'b_1', 'text': 'description b'}
],
'tags': [],
'table_readers': [
{'user': {'email': 'test@test.com', 'first_name': None, 'last_name': None}, 'read_count': 100}
],
'watermarks': [
{'watermark_type': 'low_watermark', 'partition_key': 'ds', 'partition_value': '', 'create_time': ''},
{'watermark_type': 'high_watermark', 'partition_key': 'ds', 'partition_value': '', 'create_time': ''}
],
'table_writer': {
'application_url': 'https://test-test.test.test',
'name': 'test_name',
'id': 'test_id',
'description': 'This is a test'
}
}
self.expected_parsed_metadata = {
'badges': [],
'cluster': 'test_cluster',
'database': 'test_db',
'schema': 'test_schema',
'name': 'test_table',
'key': 'test_db://test_cluster.test_schema/test_table',
'description': 'This is a test',
'tags': [],
'table_readers': [
{
'user': {
'email': 'test@test.com',
'first_name': None,
'last_name': None,
'display_name': 'test@test.com',
'profile_url': ''
},
'read_count': 100
}
],
'partition': {
'is_partitioned': True,
'key': 'ds',
'value': '01-30-2019'
},
'owners': [],
'is_view': False,
'columns': [
{
'name': 'column_1',
'description': 'This is a test',
'col_type': 'bigint',
'sort_order': 0,
'stats': [
{'stat_type': 'count', 'stat_val': '100', 'start_epoch': 1538352000, 'end_epoch': 1538352000},
{'stat_type': 'count_null', 'stat_val': '0', 'start_epoch': 1538352000, 'end_epoch': 1538352000}
],
'is_editable': True
}
],
"programmatic_descriptions": [
{
'source': 'a',
'text': 'description a'
},
{
'source': 'b',
'text': 'description b'
},
{
'source': 'c',
'text': 'description c'
},
],
'resource_reports': [],
'table_writer': {
'application_url': 'https://test-test.test.test',
'name': 'test_name',
'id': 'test_id',
'description': 'This is a test'
},
'watermarks': [
{'watermark_type': 'low_watermark', 'partition_key': 'ds', 'partition_value': '', 'create_time': ''},
{'watermark_type': 'high_watermark', 'partition_key': 'ds', 'partition_value': '', 'create_time': ''}
],
'source': '/source',
'is_editable': True,
'last_updated_timestamp': None
}
self.expected_related_dashboard_response = {
"dashboards": [{
"group_name": "Group Test One",
"description": None,
"cluster": "gold",
"group_url": "https://app.mode.com/companyName/spaces/123",
"uri": "mode_dashboard://gold.123/234rt78",
"last_successful_run_timestamp": 1590505846,
"name": "Test Dashboard One",
"product": "mode",
"url": "https://app.mode.com/companyName/reports/234rt78"
}, {
"group_name": "Group Test Two",
"description": None,
"cluster": "gold",
"group_url": "https://app.mode.com/companyName/spaces/334",
"uri": "mode_dashboard://gold.334/34thyu56",
"last_successful_run_timestamp": 1590519704,
"name": "Test Dashboard Two",
"product": "mode",
"url": "https://app.mode.com/companyName/reports/34thyu56"
}, {
"group_name": "Group Test Three",
"description": None,
"cluster": "gold",
"group_url": "https://app.mode.com/companyName/spaces/789",
"uri": "mode_dashboard://gold.789/12ed34",
"last_successful_run_timestamp": 1590538191,
"name": "Test Dashboard Three",
"product": "mode",
"url": "https://app.mode.com/companyName/reports/12ed34"
}]
}
self.expected_programmatic_descriptions_with_config = {
"programmatic_descriptions": [
{
'source': 'a',
'text': 'description a'
},
{
'source': 'b',
'text': 'description b'
},
{
'source': 'c',
'text': 'description c'
},
]
}
self.mock_tags = {
'tag_usages': [
{
'tag_count': 3,
'tag_name': 'tag_0'
}, {
'tag_count': 4,
'tag_name': 'tag_1'
}, {
'tag_count': 5,
'tag_name': 'tag_2'
}, {
'tag_count': 10,
'tag_name': 'tag_3'
}, {
'tag_count': 1,
'tag_name': 'tag_4'
}
]
}
self.expected_parsed_tags = [
{
'tag_count': 3,
'tag_name': 'tag_0'
},
{
'tag_count': 4,
'tag_name': 'tag_1'
},
{
'tag_count': 5,
'tag_name': 'tag_2'
},
{
'tag_count': 10,
'tag_name': 'tag_3'
},
{
'tag_count': 1,
'tag_name': 'tag_4'
}
]
self.mock_user = {
'email': 'test@test.com',
'employee_type': 'FTE',
'first_name': 'Firstname',
'full_name': 'Firstname Lastname',
'github_username': 'githubusername',
'is_active': True,
'last_name': 'Lastname',
'manager_id': 'managerid',
'manager_fullname': 'Manager Fullname',
'role_name': 'SWE',
'slack_id': 'slackuserid',
'team_name': 'Amundsen',
'user_id': 'testuserid',
}
self.expected_parsed_user = {
'display_name': 'Firstname Lastname',
'email': 'test@test.com',
'employee_type': 'FTE',
'first_name': 'Firstname',
'full_name': 'Firstname Lastname',
'github_username': 'githubusername',
'is_active': True,
'last_name': 'Lastname',
'manager_email': None,
'manager_fullname': 'Manager Fullname',
'manager_id': 'managerid',
'profile_url': '',
'role_name': 'SWE',
'slack_id': 'slackuserid',
'team_name': 'Amundsen',
'user_id': 'testuserid',
}
self.get_user_resource_response = {
'table': [
{
'cluster': 'cluster',
'database': 'database',
'schema': 'schema',
'name': 'table_name_0',
'description': 'description',
},
{
'cluster': 'cluster',
'database': 'database',
'schema': 'schema',
'name': 'table_name_1',
'description': 'description',
},
],
'dashboard': [],
}
self.expected_parsed_user_resources = {
'table': [
{
'cluster': 'cluster',
'database': 'database',
'description': 'description',
'last_updated_timestamp': None,
'name': 'table_name_0',
'schema': 'schema',
'type': 'table',
'key': 'database://cluster.schema/table_name_0',
},
{
'cluster': 'cluster',
'database': 'database',
'description': 'description',
'last_updated_timestamp': None,
'name': 'table_name_1',
'schema': 'schema',
'type': 'table',
'key': 'database://cluster.schema/table_name_1',
},
],
'dashboard': [],
}
self.mock_dashboard_metadata = {
"badges": [],
"chart_names": [],
"cluster": "gold",
"created_timestamp": 1558035206,
"description": "test description",
"frequent_users": [],
"group_name": "test group name",
"group_url": "test url",
"last_run_state": "failed",
"last_run_timestamp": 1587395014,
"last_successful_run_timestamp": 1578434241,
"name": "test report name",
"owners": [
{
"display_name": "First Last",
"email": "email@mail.com",
"employee_type": "teamMember",
"first_name": "First",
"full_name": "First Last",
"github_username": "",
"is_active": True,
"last_name": "Last",
"manager_email": "",
"manager_fullname": "",
"profile_url": "",
"role_name": "Test Role",
"slack_id": "",
"team_name": "Team Name",
"user_id": "test_user_id"
}
],
"product": "mode",
"query_names": [
"test query 1",
"test query 2",
"test query 3",
],
"recent_view_count": 8,
"tables": [
{
"cluster": "cluster",
"database": "database",
"description": "test description",
"key": "database://cluster.schema/name",
"last_updated_timestamp": None,
"name": "name",
"schema": "schema",
"type": "table"
},
{
"cluster": "cluster",
"database": "database",
"description": "test description",
"key": "database://cluster.schema/name_2",
"last_updated_timestamp": None,
"name": "name_2",
"schema": "schema",
"type": "table"
},
],
"tags": [
{
"tag_name": "amundsen",
"tag_type": "default"
},
],
"updated_timestamp": 1578433917,
"uri": "test_dashboard_uri",
"url": "test_dashboard_url"
}
self.expected_parsed_dashboard = {
"badges": [],
"chart_names": [],
"cluster": "gold",
"created_timestamp": 1558035206,
"description": "test description",
"frequent_users": [],
"group_name": "test group name",
"group_url": "test url",
"last_run_state": "failed",
"last_run_timestamp": 1587395014,
"last_successful_run_timestamp": 1578434241,
"name": "test report name",
"owners": [
{
"display_name": "First Last",
"email": "email@mail.com",
"employee_type": "teamMember",
"first_name": "First",
"full_name": "First Last",
"github_username": "",
"is_active": True,
"last_name": "Last",
"manager_email": "",
"manager_fullname": "",
"profile_url": "",
"role_name": "Test Role",
"slack_id": "",
"team_name": "Team Name",
"user_id": "test_user_id"
}
],
"product": "mode",
"query_names": [
"test query 1",
"test query 2",
"test query 3",
],
"recent_view_count": 8,
"tables": [
{
"cluster": "cluster",
"database": "database",
"description": "test description",
"key": "database://cluster.schema/name",
"last_updated_timestamp": None,
"name": "name",
"schema": "schema",
"type": "table"
},
{
"cluster": "cluster",
"database": "database",
"description": "test description",
"key": "database://cluster.schema/name_2",
"last_updated_timestamp": None,
"name": "name_2",
"schema": "schema",
"type": "table"
},
],
"tags": [
{
"tag_name": "amundsen",
"tag_type": "default"
},
],
"updated_timestamp": 1578433917,
"uri": "test_dashboard_uri",
"url": "test_dashboard_url"
}
@responses.activate
def test_popular_tables_success(self) -> None:
"""
Test successful popular_tables request
:return:
"""
mock_url = local_app.config['METADATASERVICE_BASE'] \
+ POPULAR_TABLES_ENDPOINT \
+ f'/{TEST_USER_ID}'
responses.add(responses.GET, mock_url,
json=self.mock_popular_tables, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/popular_tables')
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertCountEqual(data.get('results'), self.expected_parsed_popular_tables)
@responses.activate
def test_popular_tables_propagate_failure(self) -> None:
"""
Test that any error codes from the request are propagated through, to be
returned to the React application
:return:
"""
mock_url = local_app.config['METADATASERVICE_BASE'] \
+ POPULAR_TABLES_ENDPOINT \
+ f'/{TEST_USER_ID}'
responses.add(responses.GET, mock_url,
json=self.mock_popular_tables, status=HTTPStatus.BAD_REQUEST)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/popular_tables')
self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)
@responses.activate
def test_popular_tables_catch_exception(self) -> None:
"""
Test catching exception if there is an issue processing the popular table
results from the metadata service
:return:
"""
mock_url = local_app.config['METADATASERVICE_BASE'] \
+ POPULAR_TABLES_ENDPOINT \
+ f'/{TEST_USER_ID}'
responses.add(responses.GET, mock_url,
json={'popular_tables': None}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/popular_tables')
self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)
@responses.activate
def test_get_table_metadata_success(self) -> None:
"""
Test successful get_table_metadata request
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/db://cluster.schema/table'
responses.add(responses.GET, url, json=self.mock_metadata, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/table',
query_string=dict(
key='db://cluster.schema/table',
index='0',
source='test_source'
)
)
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertCountEqual(data.get('tableData'), self.expected_parsed_metadata)
@responses.activate
def test_update_table_owner_success(self) -> None:
"""
Test successful update_table_owner request
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/db://cluster.schema/table/owner/test'
responses.add(responses.PUT, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/update_table_owner',
json={
'key': 'db://cluster.schema/table',
'owner': 'test'
}
)
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_update_table_owner_propagate_failure(self) -> None:
"""
Test that any error codes from the update_table_owner request are propagated
to be returned to the React application
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/db://cluster.schema/table/owner/test'
responses.add(responses.PUT, url, json={}, status=HTTPStatus.BAD_REQUEST)
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/update_table_owner',
json={
'key': 'db://cluster.schema/table',
'owner': 'test'
}
)
self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)
@responses.activate
def test_get_last_indexed_success(self) -> None:
"""
Test successful get_last_indexed request
:return:
"""
responses.add(responses.GET, local_app.config['METADATASERVICE_BASE'] + LAST_INDEXED_ENDPOINT,
json={'neo4j_latest_timestamp': 1538352000}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/get_last_indexed')
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertEqual(data.get('timestamp'), 1538352000)
@responses.activate
def test_get_last_indexed_propagate_failure(self) -> None:
"""
Test that any error codes from the get_last_indexed request are propagated through,
to be returned to the React application
:return:
"""
responses.add(responses.GET, local_app.config['METADATASERVICE_BASE'] + LAST_INDEXED_ENDPOINT,
json=None, status=HTTPStatus.BAD_REQUEST)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/get_last_indexed')
self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)
@responses.activate
def test_get_table_description_success(self) -> None:
"""
Test successful get_table_description request
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/db://cluster.schema/table/description'
responses.add(responses.GET, url, json={'description': 'This is a test'}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/get_table_description',
query_string=dict(key='db://cluster.schema/table')
)
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertEqual(data.get('description'), 'This is a test')
@responses.activate
def test_get_table_description_propagate_failure(self) -> None:
"""
Test that any error codes from the get_table_description request are propagated through,
to be returned to the React application
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/db://cluster.schema/table/description'
responses.add(responses.GET, url, json={'description': 'This is a test'}, status=HTTPStatus.BAD_REQUEST)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/get_table_description',
query_string=dict(key='db://cluster.schema/table')
)
self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)
@responses.activate
def test_put_table_description_success(self) -> None:
"""
Test successful put_table_description request
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/db://cluster.schema/table/description'
responses.add(responses.PUT, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/put_table_description',
json={
'key': 'db://cluster.schema/table',
'description': 'test',
'source': 'source'
}
)
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_put_table_description_denied(self) -> None:
"""
Test put_table_description that should be rejected due to permissions.
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + \
'/db://cluster.schema/table/column/colA/description'
responses.add(responses.GET, url, json={'description': 'This is a test'}, status=HTTPStatus.OK)
with patch.dict(local_app.config, {'UNEDITABLE_SCHEMAS': set(['schema'])}):
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/put_table_description',
json={
'key': 'db://cluster.schema/table',
'description': 'test',
'source': 'source'
}
)
self.assertEqual(response.status_code, HTTPStatus.FORBIDDEN)
@responses.activate
def test_get_column_description_success(self) -> None:
"""
Test successful get_column_description request
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + \
'/db://cluster.schema/table/column/colA/description'
responses.add(responses.GET, url, json={'description': 'This is a test'}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/get_column_description',
query_string=dict(
key='db://cluster.schema/table',
index='0',
column_name='colA'
)
)
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertEqual(data.get('description'), 'This is a test')
@responses.activate
def test_get_column_description_propagate_failure(self) -> None:
"""
Test that any error codes from the get_column_description request are propagated through,
to be returned to the React application
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + \
'/db://cluster.schema/table/column/colA/description'
responses.add(responses.GET, url, json={'description': 'This is a test'}, status=HTTPStatus.BAD_REQUEST)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/get_column_description',
query_string=dict(
key='db://cluster.schema/table',
index='0',
column_name='colA'
)
)
self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)
@responses.activate
def test_put_column_description_success(self) -> None:
"""
Test successful put_column_description request
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + \
'/db://cluster.schema/table/column/col/description'
responses.add(responses.PUT, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/put_column_description',
json={
'key': 'db://cluster.schema/table',
'column_name': 'col',
'description': 'test',
'source': 'source'
}
)
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_put_column_description_denied(self) -> None:
"""
Test put_column_description on an unwritable table.
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + \
'/db://cluster.schema/an_uneditable_table/column/col/description'
responses.add(responses.PUT, url, json={}, status=HTTPStatus.OK)
rule = MatchRuleObject(table_name_regex=r".*uneditable_table.*")
with patch.dict(local_app.config, {'UNEDITABLE_TABLE_DESCRIPTION_MATCH_RULES': [rule]}):
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/put_column_description',
json={
'key': 'db://cluster.schema/an_uneditable_table',
'column_name': 'col',
'description': 'test',
'source': 'source'
}
)
self.assertEqual(response.status_code, HTTPStatus.FORBIDDEN)
@responses.activate
def test_get_tags(self) -> None:
"""
Test successful fetch of all tags
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TAGS_ENDPOINT
responses.add(responses.GET, url, json=self.mock_tags, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/tags')
data = json.loads(response.data)
self.assertCountEqual(data.get('tags'), self.expected_parsed_tags)
@responses.activate
def test_update_table_tags_put(self) -> None:
"""
Test adding a tag on a table
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/db://cluster.schema/table/tag/tag_5'
responses.add(responses.PUT, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/update_table_tags',
json={
'key': 'db://cluster.schema/table',
'tag': 'tag_5'
}
)
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_update_table_tags_delete(self) -> None:
"""
Test deleting a tag on a table
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/db://cluster.schema/table/tag/tag_5'
responses.add(responses.DELETE, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.delete(
'/api/metadata/v0/update_table_tags',
json={
'key': 'db://cluster.schema/table',
'tag': 'tag_5'
}
)
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_update_dashboard_tags_put(self) -> None:
"""
Test adding a tag on a dashboard
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + DASHBOARD_ENDPOINT + '/test_dashboard_uri/tag/test_tag'
responses.add(responses.PUT, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/update_dashboard_tags',
json={
'key': 'test_dashboard_uri',
'tag': 'test_tag'
}
)
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_update_dashboard_tags_delete(self) -> None:
"""
Test deleting a tag on a dashboard
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + DASHBOARD_ENDPOINT + '/test_dashboard_uri/tag/test_tag'
responses.add(responses.DELETE, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.delete(
'/api/metadata/v0/update_dashboard_tags',
json={
'key': 'test_dashboard_uri',
'tag': 'test_tag'
}
)
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_get_user_failure(self) -> None:
"""
Test get_user fails when no user_id is specified
"""
url = local_app.config['METADATASERVICE_BASE'] + USER_ENDPOINT + '/testuser'
responses.add(responses.GET, url, json=self.mock_user, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/user')
self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)
@responses.activate
def test_get_user_success(self) -> None:
"""
Test get_user success
"""
url = local_app.config['METADATASERVICE_BASE'] + USER_ENDPOINT + '/testuser'
responses.add(responses.GET, url, json=self.mock_user, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/user', query_string=dict(user_id='testuser'))
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertEqual(dict(data.get('user'), **self.expected_parsed_user), data.get('user'))
@responses.activate
def test_get_bookmark(self) -> None:
"""
Test get_bookmark with no user specified
"""
url = '{0}{1}/{2}/follow/'.format(local_app.config['METADATASERVICE_BASE'], USER_ENDPOINT, TEST_USER_ID)
responses.add(responses.GET, url, json=self.get_user_resource_response, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/user/bookmark')
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertCountEqual(data.get('bookmarks'), self.expected_parsed_user_resources)
@responses.activate
def test_get_bookmark_failure(self) -> None:
"""
Test correct response returned when get_bookmark fails
"""
url = f"{local_app.config['METADATASERVICE_BASE']}{USER_ENDPOINT}/{TEST_USER_ID}/follow/"
responses.add(responses.GET, url, json=self.mock_user, status=HTTPStatus.BAD_REQUEST)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/user/bookmark')
self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)
expected = {
'bookmarks': {'table': [], 'dashboard': []},
'msg': 'Encountered error: failed to get bookmark for user_id: test_user_id',
}
self.assertEqual(response.json, expected)
@responses.activate
def test_get_bookmark_for_user(self) -> None:
"""
Test get_bookmark with a specified user
"""
specified_user = 'other_user'
url = '{0}{1}/{2}/follow/'.format(local_app.config['METADATASERVICE_BASE'], USER_ENDPOINT, specified_user)
responses.add(responses.GET, url, json=self.get_user_resource_response, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/user/bookmark', query_string=dict(user_id=specified_user))
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertCountEqual(data.get('bookmarks'), self.expected_parsed_user_resources)
@responses.activate
def test_put_bookmark(self) -> None:
"""
Test update_bookmark with a PUT request
"""
resource_type = 'table'
key = 'database://cluster.schema/table_name_1'
url = '{0}{1}/{2}/follow/{3}/{4}'.format(local_app.config['METADATASERVICE_BASE'],
USER_ENDPOINT,
TEST_USER_ID,
resource_type,
key)
responses.add(responses.PUT, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.put(
'/api/metadata/v0/user/bookmark',
json={
'type': resource_type,
'key': key,
})
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_delete_bookmark(self) -> None:
"""
Test update_bookmark with a DELETE request
"""
resource_type = 'table'
key = 'database://cluster.schema/table_name_1'
url = '{0}{1}/{2}/follow/{3}/{4}'.format(local_app.config['METADATASERVICE_BASE'],
USER_ENDPOINT,
TEST_USER_ID,
resource_type,
key)
responses.add(responses.DELETE, url, json={}, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.delete(
'/api/metadata/v0/user/bookmark',
json={
'type': resource_type,
'key': key,
})
self.assertEqual(response.status_code, HTTPStatus.OK)
@responses.activate
def test_get_user_read(self) -> None:
"""
Test get_user_read API request
"""
test_user = 'test_user'
url = '{0}{1}/{2}/read/'.format(local_app.config['METADATASERVICE_BASE'], USER_ENDPOINT, test_user)
responses.add(responses.GET, url, json=self.get_user_resource_response, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/user/read', query_string=dict(user_id=test_user))
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertCountEqual(data.get('read'), self.expected_parsed_user_resources.get('table'))
@responses.activate
def test_get_user_own(self) -> None:
"""
Test get_user_own API request
"""
test_user = 'test_user'
url = '{0}{1}/{2}/own/'.format(local_app.config['METADATASERVICE_BASE'], USER_ENDPOINT, test_user)
responses.add(responses.GET, url, json=self.get_user_resource_response, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get('/api/metadata/v0/user/own', query_string=dict(user_id=test_user))
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertCountEqual(data.get('own'), self.expected_parsed_user_resources)
@responses.activate
def test_get_dashboard_success(self) -> None:
"""
Test get_dashboard API success
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + DASHBOARD_ENDPOINT + '/test_dashboard_uri'
responses.add(responses.GET, url, json=self.mock_dashboard_metadata, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/dashboard',
query_string=dict(
uri='test_dashboard_uri',
index='0',
source='test_source'
)
)
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertCountEqual(data.get('dashboard'), self.expected_parsed_dashboard)
@responses.activate
def test_get_dashboard_bad_parameters(self) -> None:
"""
Test get_dashboard API failure with missing URI parameter
:return:
"""
url = local_app.config['METADATASERVICE_BASE'] + DASHBOARD_ENDPOINT + '/test_dashboard_uri'
responses.add(responses.GET, url, json=self.mock_dashboard_metadata, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/dashboard',
query_string=dict()
)
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)
self.assertCountEqual(data.get('dashboard'), {})
@responses.activate
def test_get_related_dashboards_success(self) -> None:
"""
Test get_related_dashboards API success
:return:
"""
test_table = 'db://cluster.schema/table'
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/' + test_table + '/dashboard/'
responses.add(responses.GET, url, json=self.expected_related_dashboard_response, status=HTTPStatus.OK)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/table/{0}/dashboards'.format(test_table)
)
data = json.loads(response.data)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertEqual(
len(data.get('dashboards')),
len(self.expected_related_dashboard_response.get('dashboards'))
)
@responses.activate
def test_get_related_dashboards_failure(self) -> None:
"""
Test get_related_dashboards API failure
:return:
"""
test_table = 'db://cluster.schema/table'
url = local_app.config['METADATASERVICE_BASE'] + TABLE_ENDPOINT + '/' + test_table + '/dashboard/'
responses.add(responses.GET, url, json=self.expected_related_dashboard_response, status=HTTPStatus.BAD_REQUEST)
with local_app.test_client() as test:
response = test.get(
'/api/metadata/v0/table/{0}/dashboards'.format(test_table)
)
self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)
expected = {
'dashboards': [],
'msg': 'Encountered 400 Error: Related dashboard metadata request failed',
'status_code': 400
}
self.assertEqual(response.json, expected)
| 39.541591
| 120
| 0.51851
|
4a191f41cf38151b7b62732f50d39730def10538
| 1,641
|
py
|
Python
|
demos/unpaired_ct_lung/demo_predict.py
|
Zhiyuan-w/DeepReg
|
3e372d1835fdc9468c026db3767dcf9e8d4a4b0e
|
[
"Apache-2.0"
] | 379
|
2020-07-18T22:00:53.000Z
|
2022-03-31T05:17:29.000Z
|
demos/unpaired_ct_lung/demo_predict.py
|
Zhiyuan-w/DeepReg
|
3e372d1835fdc9468c026db3767dcf9e8d4a4b0e
|
[
"Apache-2.0"
] | 646
|
2020-07-18T08:55:48.000Z
|
2022-03-29T02:24:54.000Z
|
demos/unpaired_ct_lung/demo_predict.py
|
Zhiyuan-w/DeepReg
|
3e372d1835fdc9468c026db3767dcf9e8d4a4b0e
|
[
"Apache-2.0"
] | 62
|
2020-07-26T05:00:23.000Z
|
2022-02-22T21:58:19.000Z
|
# pylint: disable=line-too-long
import argparse
from datetime import datetime
from deepreg.predict import predict
name = "unpaired_ct_lung"
# parser is used to simplify testing, by default it is not used
# please run the script with --full flag to ensure non-testing mode
# for instance:
# python script.py --full
parser = argparse.ArgumentParser()
parser.add_argument(
"--test",
help="Execute the script with reduced image size for test purpose.",
dest="test",
action="store_true",
)
parser.add_argument(
"--full",
help="Execute the script with full configuration.",
dest="test",
action="store_false",
)
parser.set_defaults(test=False)
args = parser.parse_args()
print(
"\n\n\n\n\n"
"=========================================================\n"
"The prediction can also be launched using the following command.\n"
"deepreg_predict --gpu '' "
f"--config_path demos/{name}/{name}.yaml "
f"--ckpt_path demos/{name}/dataset/pretrained/ckpt-5000 "
f"--log_dir demos/{name} "
"--exp_name logs_predict "
"--save_png --split test\n"
"=========================================================\n"
"\n\n\n\n\n"
)
log_dir = f"demos/{name}"
exp_name = "logs_predict/" + datetime.now().strftime("%Y%m%d-%H%M%S")
ckpt_path = f"{log_dir}/dataset/pretrained/ckpt-5000"
config_path = [f"{log_dir}/{name}.yaml"]
if args.test:
config_path.append("config/test/demo_unpaired_grouped.yaml")
predict(
gpu="0",
gpu_allow_growth=True,
ckpt_path=ckpt_path,
split="test",
batch_size=1,
log_dir=log_dir,
exp_name=exp_name,
config_path=config_path,
)
| 26.901639
| 72
| 0.63376
|
4a191fdc9709f1f24a5cae7828375ed3683ce21b
| 2,903
|
py
|
Python
|
src/cfnlint/rules/resources/route53/RecordSetName.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | 1,134
|
2019-03-02T14:58:34.000Z
|
2021-05-15T00:57:16.000Z
|
src/cfnlint/rules/resources/route53/RecordSetName.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | 1,122
|
2019-03-03T04:27:15.000Z
|
2021-05-14T20:51:16.000Z
|
src/cfnlint/rules/resources/route53/RecordSetName.py
|
tomislacker/cfn-python-lint
|
f209ddfef9bcc1a005adfebcfcc16220b18deddb
|
[
"MIT-0"
] | 297
|
2019-03-11T09:56:57.000Z
|
2021-05-14T16:41:19.000Z
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import six
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class RecordSetName(CloudFormationLintRule):
"""Check if a Route53 Resoruce Records Name is valid with a HostedZoneName"""
id = 'E3041'
shortdesc = 'RecordSet HostedZoneName is a superdomain of Name'
description = 'In a RecordSet, the HostedZoneName must be a superdomain of the Name being validated'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-route53-recordset.html#cfn-route53-recordset-name'
tags = ['resource', 'properties', 'route53']
def __init__(self):
""" Init """
super(RecordSetName, self).__init__()
self.resource_property_types = ['AWS::Route53::RecordSet']
def match_resource_properties(self, properties, _, path, cfn):
matches = []
property_sets = cfn.get_object_without_conditions(properties, ['Name', 'HostedZoneName'])
for property_set in property_sets:
props = property_set.get('Object')
scenario = property_set.get('Scenario')
name = props.get('Name', None)
hz_name = props.get('HostedZoneName', None)
if isinstance(name, six.string_types) and isinstance(hz_name, six.string_types):
if hz_name[-1] != '.':
message = 'HostedZoneName must end in a dot at {}'
if scenario is None:
matches.append(
RuleMatch(path[:] + ['HostedZoneName'], message.format('/'.join(map(str, path)))))
else:
scenario_text = ' and '.join(
['when condition "%s" is %s' % (k, v) for (k, v) in scenario.items()])
matches.append(
RuleMatch(path[:] + ['HostedZoneName'], message.format('/'.join(map(str, path)) + ' ' + scenario_text)))
if hz_name[-1] == '.':
hz_name = hz_name[:-1]
if name[-1] == '.':
name = name[:-1]
if hz_name not in [name, name[-len(hz_name):]]:
message = 'Name must be a superdomain of HostedZoneName at {}'
if scenario is None:
matches.append(
RuleMatch(path[:] + ['Name'], message.format('/'.join(map(str, path)))))
else:
scenario_text = ' and '.join(
['when condition "%s" is %s' % (k, v) for (k, v) in scenario.items()])
matches.append(
RuleMatch(path[:] + ['Name'], message.format('/'.join(map(str, path)) + ' ' + scenario_text)))
return matches
| 48.383333
| 146
| 0.548398
|
4a19212d27217f167b2ec7d67bc4081e6dcaae31
| 7,064
|
py
|
Python
|
samples/python/topology/spl/vwap_event_time/vwap_event_time.py
|
markheger/streamsx.topology
|
8118513146399fa6a9490a1debd8037615d7acd1
|
[
"Apache-2.0"
] | 31
|
2015-06-24T06:21:14.000Z
|
2020-08-28T21:45:50.000Z
|
samples/python/topology/spl/vwap_event_time/vwap_event_time.py
|
markheger/streamsx.topology
|
8118513146399fa6a9490a1debd8037615d7acd1
|
[
"Apache-2.0"
] | 1,203
|
2015-06-15T02:11:49.000Z
|
2021-03-22T09:47:54.000Z
|
samples/python/topology/spl/vwap_event_time/vwap_event_time.py
|
markheger/streamsx.topology
|
8118513146399fa6a9490a1debd8037615d7acd1
|
[
"Apache-2.0"
] | 53
|
2015-05-28T21:14:16.000Z
|
2021-12-23T12:58:59.000Z
|
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2020
import os
import streamsx.topology.context as context
from streamsx.topology.topology import Topology
from streamsx.topology.context import submit, ContextTypes,ConfigParams
from streamsx.topology.schema import StreamSchema
import streamsx.spl.op as op
import streamsx.spl.types
import streamsx.standard.files as files
from streamsx.standard import Compression, Format
import streamsx.standard.relational as R
script_dir = os.path.dirname(os.path.realpath(__file__))
def data_source(topo, schema):
input_file = 'TradesAndQuotes.csv.gz'
sample_file = os.path.join(script_dir, input_file)
topo.add_file_dependency(sample_file, 'etc') # add sample file to etc dir in bundle
fn = os.path.join('etc', input_file) # file name relative to application dir
s = topo.source(files.CSVReader(schema=schema, file=fn, compression=Compression.gzip.name))
# add event-time
TQRecTWithEvTime = StreamSchema(schema).extend(StreamSchema('tuple<timestamp evTime>'))
fo = R.Functor.map(s, TQRecTWithEvTime)
fo.evTime = fo.output(fo.outputs[0], op.Expression.expression('timeStringToTimestamp(date, time, false)'))
ev_stream = fo.outputs[0]
ev_stream = ev_stream.set_event_time('evTime')
return ev_stream
def data_sink(stream):
fsink_config = {
'format': Format.txt.name
}
fsink = files.FileSink(file=streamsx.spl.op.Expression.expression('"'+script_dir+'/out.txt"'), **fsink_config)
stream.for_each(fsink)
def main():
"""
This demonstrates the invocation of SPL operators from
the SPL standard toolkit using streamsx.standard package.
Computes the volume-weighted average price (VWAP) for a stream of stock
transactions. Given trades and quotes, this application produces a
bargain index for a given list of stocks. The WVAP is calculated over
event-time intervals of 10 seconds and is calculated every second.
The average is not recalculated if late trades are received (tuples
are ignored).
The bargain index identifies possible trade opportunities, which occur
when the volume-weighted average price for trades executed within the
last 10 seconds exceeds the current ask price. This algorithm may be
used to identify opportunities to pick up well priced shares in the
stream of current transaction data.
Given an input file containing trades and quotes, this application
produces an output file called `out.txt` in the script's directory.
The output file lists the VWAP and bargain index.
The bargain index identifies the magnitude of the bargain, where a greater
values implies a better bargain. A value of 0 indicates that the VWAP is
not greater than the asking price, and is therefore not a bargain.
A few records from the output file are show below:
{ticker="BK",vwap=32.51,askprice=32.51,asksize=50,date="27-DEC-2005",time="14:30:17.098",index=0}
{ticker="BK",vwap=32.51,askprice=32.51,asksize=48,date="27-DEC-2005",time="14:30:17.100",index=0}
{ticker="IBM",vwap=83.47991935483871,askprice=83.46,asksize=10,date="27-DEC-2005",time="14:30:17.238",index=10.20119069042564}
{ticker="IBM",vwap=83.47991935483871,askprice=83.46,asksize=10,date="27-DEC-2005",time="14:30:17.238",index=10.20119069042564}
Each record shows the content of the tuples received on the `bargain_index_stream` stream as a set of tuple attributes. The attributes are formatted as a key-value pair.
The `vwap` and `index` attributes contains the value-weighted average price and bargain index, respectively.
Example::
python3 vwap_event_time.py
Output:
Bargain Index reports in file "out.txt"
"""
topo = Topology(name='VwapEventTime')
# schema definitions
TQRecT = 'tuple<rstring ticker,rstring date, rstring time, int32 gmtOffset, rstring ttype, rstring exCntrbID, decimal64 price, decimal64 volume, decimal64 vwap, rstring buyerID, decimal64 bidprice, decimal64 bidsize, int32 numbuyers, rstring sellerID, decimal64 askprice, decimal64 asksize, int32 numsellers, rstring qualifiers, int32 seqno, rstring exchtime, decimal64 blockTrd, decimal64 floorTrd, decimal64 PEratio, decimal64 yield, decimal64 newprice, decimal64 newvol, int32 newseqno, decimal64 bidimpvol, decimal64 askimpcol, decimal64 impvol>'
TradeInfoT = 'tuple<decimal64 price, decimal64 volume, rstring date, rstring time, timestamp evTime, rstring ticker>'
QuoteInfoT = 'tuple<decimal64 bidprice, decimal64 askprice, decimal64 asksize, rstring date, rstring time, timestamp evTime, rstring ticker>'
VwapT = 'tuple<rstring ticker, decimal64 minprice, decimal64 maxprice, decimal64 avgprice, decimal64 vwap, timestamp start, timestamp end>'
BargainIndexT = 'tuple<rstring ticker, decimal64 vwap, decimal64 askprice, decimal64 asksize, rstring date, rstring time, decimal64 index>'
trade_quote_eventtime_stream = data_source(topo, TQRecT)
# split quotes and trades
fq = R.Functor.map(trade_quote_eventtime_stream, StreamSchema(QuoteInfoT), filter='ttype=="Quote" && (ticker in {"BK", "IBM", "ANR"})', name='QuoteFilter')
quotes_stream = fq.outputs[0]
ft = R.Functor.map(trade_quote_eventtime_stream, StreamSchema(TradeInfoT), filter='ttype=="Trade" && (ticker in {"BK", "IBM", "ANR"})', name='TradeFilter')
trades_stream = ft.outputs[0]
# Aggregation over event-time intervals of 10 seconds, calculated every second
w = trades_stream.time_interval(interval_duration=10.0, creation_period=1.0).partition('ticker')
aggregate_schema = StreamSchema(VwapT).extend(StreamSchema('tuple<decimal64 sumvolume>'))
a = R.Aggregate.invoke(w, aggregate_schema, name='PreVwap')
a.vwap = a.sum('price * volume')
a.minprice = a.min('price')
a.maxprice = a.max('price')
a.avgprice = a.average('price')
a.sumvolume = a.sum('volume')
a.start = a.interval_start()
a.end = a.interval_end()
pre_vwap_stream = a.stream
f_vwap = R.Functor.map(pre_vwap_stream, StreamSchema(VwapT), name='Vwap')
f_vwap.vwap = f_vwap.output(f_vwap.outputs[0], 'vwap / sumvolume')
vwap_stream = f_vwap.outputs[0]
# Join quotes with an event-time up to one second greater than the VWAP time */
win_vwap = vwap_stream.last(size=100).partition('ticker')
j = R.Join.lookup(
reference=win_vwap,
reference_key='ticker',
lookup=quotes_stream,
lookup_key='ticker',
schema=BargainIndexT,
match='(Vwap.end <= QuoteFilter.evTime) && (QuoteFilter.evTime < add(Vwap.end, (float64)1.0))',
name='BargainIndex')
j.index = j.output(j.outputs[0], 'vwap > askprice ? asksize * exp(vwap - askprice) : 0d')
bargain_index_stream = j.outputs[0]
# Write data in output file
data_sink(bargain_index_stream)
# Now execute the topology by submitting to a standalone context.
submit(ContextTypes.STANDALONE, topo)
if __name__ == '__main__':
main()
| 50.820144
| 554
| 0.73004
|
4a1921bf3126c915d5a112c662820c243c066207
| 10,190
|
py
|
Python
|
tests/ci/metrics_lambda/app.py
|
mrk-andreev/ClickHouse
|
a36f05d6b892aa714c02661c87e2c28f2239020d
|
[
"Apache-2.0"
] | 8,629
|
2016-06-14T21:03:01.000Z
|
2019-09-23T07:46:38.000Z
|
tests/ci/metrics_lambda/app.py
|
mrk-andreev/ClickHouse
|
a36f05d6b892aa714c02661c87e2c28f2239020d
|
[
"Apache-2.0"
] | 4,335
|
2016-06-15T12:58:31.000Z
|
2019-09-23T11:18:43.000Z
|
tests/ci/metrics_lambda/app.py
|
mrk-andreev/ClickHouse
|
a36f05d6b892aa714c02661c87e2c28f2239020d
|
[
"Apache-2.0"
] | 1,700
|
2016-06-15T09:25:11.000Z
|
2019-09-23T11:16:38.000Z
|
#!/usr/bin/env python3
import argparse
import sys
import json
import time
from collections import namedtuple
import jwt
import requests
import boto3
from botocore.exceptions import ClientError
def get_dead_runners_in_ec2(runners):
ids = {
runner.name: runner
for runner in runners
# Only `i-deadbead123` are valid names for an instance ID
if runner.offline and not runner.busy and runner.name.startswith("i-")
}
result_to_delete = [
runner
for runner in runners
if not ids.get(runner.name) and runner.offline and not runner.busy
]
if not ids:
return []
client = boto3.client("ec2")
i = 0
inc = 100
print("Checking ids", ids.keys())
instances_statuses = []
while i < len(ids.keys()):
try:
instances_statuses.append(
client.describe_instance_status(
InstanceIds=list(ids.keys())[i : i + inc]
)
)
i += inc
except ClientError as e:
# The list of non-existent instances is in the message:
# The instance IDs 'i-069b1c256c06cf4e3, i-0f26430432b044035,
# i-0faa2ff44edbc147e, i-0eccf2514585045ec, i-0ee4ee53e0daa7d4a,
# i-07928f15acd473bad, i-0eaddda81298f9a85' do not exist
message = e.response["Error"]["Message"]
if message.startswith("The instance IDs '") and message.endswith(
"' do not exist"
):
non_existent = message[18:-14].split(", ")
for n in non_existent:
result_to_delete.append(ids.pop(n))
else:
raise
found_instances = set([])
print("Response", instances_statuses)
for instances_status in instances_statuses:
for instance_status in instances_status["InstanceStatuses"]:
if instance_status["InstanceState"]["Name"] in ("pending", "running"):
found_instances.add(instance_status["InstanceId"])
print("Found instances", found_instances)
for runner in result_to_delete:
print("Instance", runner.name, "is not alive, going to remove it")
for instance_id, runner in ids.items():
if instance_id not in found_instances:
print("Instance", instance_id, "is not alive, going to remove it")
result_to_delete.append(runner)
return result_to_delete
def get_key_and_app_from_aws():
import boto3
secret_name = "clickhouse_github_secret_key"
session = boto3.session.Session()
client = session.client(
service_name="secretsmanager",
)
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
data = json.loads(get_secret_value_response["SecretString"])
return data["clickhouse-app-key"], int(data["clickhouse-app-id"])
def handler(event, context):
private_key, app_id = get_key_and_app_from_aws()
main(private_key, app_id, True, True)
def get_installation_id(jwt_token):
headers = {
"Authorization": f"Bearer {jwt_token}",
"Accept": "application/vnd.github.v3+json",
}
response = requests.get("https://api.github.com/app/installations", headers=headers)
response.raise_for_status()
data = response.json()
for installation in data:
if installation["account"]["login"] == "ClickHouse":
installation_id = installation["id"]
return installation_id
def get_access_token(jwt_token, installation_id):
headers = {
"Authorization": f"Bearer {jwt_token}",
"Accept": "application/vnd.github.v3+json",
}
response = requests.post(
f"https://api.github.com/app/installations/{installation_id}/access_tokens",
headers=headers,
)
response.raise_for_status()
data = response.json()
return data["token"]
RunnerDescription = namedtuple(
"RunnerDescription", ["id", "name", "tags", "offline", "busy"]
)
def list_runners(access_token):
headers = {
"Authorization": f"token {access_token}",
"Accept": "application/vnd.github.v3+json",
}
response = requests.get(
"https://api.github.com/orgs/ClickHouse/actions/runners?per_page=100",
headers=headers,
)
response.raise_for_status()
data = response.json()
total_runners = data["total_count"]
runners = data["runners"]
total_pages = int(total_runners / 100 + 1)
print("Total pages", total_pages)
for i in range(2, total_pages + 1):
response = requests.get(
"https://api.github.com/orgs/ClickHouse/actions/runners"
f"?page={i}&per_page=100",
headers=headers,
)
response.raise_for_status()
data = response.json()
runners += data["runners"]
print("Total runners", len(runners))
result = []
for runner in runners:
tags = [tag["name"] for tag in runner["labels"]]
desc = RunnerDescription(
id=runner["id"],
name=runner["name"],
tags=tags,
offline=runner["status"] == "offline",
busy=runner["busy"],
)
result.append(desc)
return result
def group_runners_by_tag(listed_runners):
result = {}
RUNNER_TYPE_LABELS = [
"builder",
"func-tester",
"func-tester-aarch64",
"fuzzer-unit-tester",
"stress-tester",
"style-checker",
"style-checker-aarch64",
]
for runner in listed_runners:
for tag in runner.tags:
if tag in RUNNER_TYPE_LABELS:
if tag not in result:
result[tag] = []
result[tag].append(runner)
break
else:
if "unlabeled" not in result:
result["unlabeled"] = []
result["unlabeled"].append(runner)
return result
def push_metrics_to_cloudwatch(listed_runners, namespace):
client = boto3.client("cloudwatch")
metrics_data = []
busy_runners = sum(
1 for runner in listed_runners if runner.busy and not runner.offline
)
metrics_data.append(
{
"MetricName": "BusyRunners",
"Value": busy_runners,
"Unit": "Count",
}
)
total_active_runners = sum(1 for runner in listed_runners if not runner.offline)
metrics_data.append(
{
"MetricName": "ActiveRunners",
"Value": total_active_runners,
"Unit": "Count",
}
)
total_runners = len(listed_runners)
metrics_data.append(
{
"MetricName": "TotalRunners",
"Value": total_runners,
"Unit": "Count",
}
)
if total_active_runners == 0:
busy_ratio = 100
else:
busy_ratio = busy_runners / total_active_runners * 100
metrics_data.append(
{
"MetricName": "BusyRunnersRatio",
"Value": busy_ratio,
"Unit": "Percent",
}
)
client.put_metric_data(Namespace=namespace, MetricData=metrics_data)
def delete_runner(access_token, runner):
headers = {
"Authorization": f"token {access_token}",
"Accept": "application/vnd.github.v3+json",
}
response = requests.delete(
f"https://api.github.com/orgs/ClickHouse/actions/runners/{runner.id}",
headers=headers,
)
response.raise_for_status()
print(f"Response code deleting {runner.name} is {response.status_code}")
return response.status_code == 204
def main(github_secret_key, github_app_id, push_to_cloudwatch, delete_offline_runners):
payload = {
"iat": int(time.time()) - 60,
"exp": int(time.time()) + (10 * 60),
"iss": github_app_id,
}
encoded_jwt = jwt.encode(payload, github_secret_key, algorithm="RS256")
installation_id = get_installation_id(encoded_jwt)
access_token = get_access_token(encoded_jwt, installation_id)
runners = list_runners(access_token)
grouped_runners = group_runners_by_tag(runners)
for group, group_runners in grouped_runners.items():
if push_to_cloudwatch:
print(group)
push_metrics_to_cloudwatch(group_runners, "RunnersMetrics/" + group)
else:
print(group, f"({len(group_runners)})")
for runner in group_runners:
print("\t", runner)
if delete_offline_runners:
print("Going to delete offline runners")
dead_runners = get_dead_runners_in_ec2(runners)
for runner in dead_runners:
print("Deleting runner", runner)
delete_runner(access_token, runner)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get list of runners and their states")
parser.add_argument(
"-p", "--private-key-path", help="Path to file with private key"
)
parser.add_argument("-k", "--private-key", help="Private key")
parser.add_argument(
"-a", "--app-id", type=int, help="GitHub application ID", required=True
)
parser.add_argument(
"--push-to-cloudwatch",
action="store_true",
help="Push metrics for active and busy runners to cloudwatch",
)
parser.add_argument(
"--delete-offline", action="store_true", help="Remove offline runners"
)
args = parser.parse_args()
if not args.private_key_path and not args.private_key:
print(
"Either --private-key-path or --private-key must be specified",
file=sys.stderr,
)
if args.private_key_path and args.private_key:
print(
"Either --private-key-path or --private-key must be specified",
file=sys.stderr,
)
if args.private_key:
private_key = args.private_key
elif args.private_key_path:
with open(args.private_key_path, "r") as key_file:
private_key = key_file.read()
else:
print("Attempt to get key and id from AWS secret manager")
private_key, args.app_id = get_key_and_app_from_aws()
main(private_key, args.app_id, args.push_to_cloudwatch, args.delete_offline)
| 31.16208
| 88
| 0.616683
|
4a1922b0b5a2a075f6099e9cff40b3e7708bef07
| 2,275
|
py
|
Python
|
models/public/higher-hrnet-w32-human-pose-estimation/model.py
|
TolyaTalamanov/open_model_zoo
|
1697e60712df4ca72635a2080a197b9d3bc24129
|
[
"Apache-2.0"
] | 2,201
|
2018-10-15T14:37:19.000Z
|
2020-07-16T02:05:51.000Z
|
models/public/higher-hrnet-w32-human-pose-estimation/model.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 759
|
2018-10-18T07:43:55.000Z
|
2020-07-16T01:23:12.000Z
|
models/public/higher-hrnet-w32-human-pose-estimation/model.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 808
|
2018-10-16T14:03:49.000Z
|
2020-07-15T11:41:45.000Z
|
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from config import cfg
import models
class HpeHRNet(torch.nn.Module):
def __init__(self, cfg, weights):
super().__init__()
self.impl = models.pose_higher_hrnet.PoseHigherResolutionNet(cfg)
checkpoint = torch.load(weights, map_location='cpu')
self.impl.load_state_dict(checkpoint)
self.impl.eval()
# pooling operation to get nms_heatmaps from heatmaps out of model
self.pool = torch.nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
# ReLU operation to avoid negative values at heatmap
self.relu = torch.nn.ReLU()
def forward(self, image):
outputs = self.impl(image)
# output[0] - heatmaps_lr_and_embeddings out with size [1, 34, h/4, w/4]
# output[1] - heatmaps out with size [1, 17, h/2, h/2]
# resize low-resolution heatmaps and embeddings (outputs[0]) to heatmaps shape (output[1])
outputs[0] = torch.nn.functional.interpolate(
outputs[0],
size=(outputs[-1].size(2), outputs[-1].size(3)),
mode='bilinear',
align_corners=False
)
# average of heatmaps and apply relu
outputs[1] = (outputs[0][:, :17, :, :] + outputs[1]) / 2
outputs[1] = self.relu(outputs[1])
outputs[0] = outputs[0][:, 17:, :, :]
# apply nms for heatmaps
pooled = self.pool(outputs[1])
mask = torch.eq(pooled, outputs[1]).float()
mask = mask * 2 - 1
outputs[1] *= mask
return outputs
def get_net(file_config, weights):
cfg.defrost()
cfg.merge_from_file(file_config)
model = HpeHRNet(cfg, weights)
return model
| 37.295082
| 98
| 0.643956
|
4a19242558da7d4b554e5d79860b427096f1b398
| 1,283
|
py
|
Python
|
matrix_multiplication_complex/matrix_small_d.py
|
parthrao/PySpark-Projects-MachineLearning
|
0cbcee5cd1c7a1eb4299b14386e25d4adff4dd72
|
[
"Unlicense"
] | null | null | null |
matrix_multiplication_complex/matrix_small_d.py
|
parthrao/PySpark-Projects-MachineLearning
|
0cbcee5cd1c7a1eb4299b14386e25d4adff4dd72
|
[
"Unlicense"
] | null | null | null |
matrix_multiplication_complex/matrix_small_d.py
|
parthrao/PySpark-Projects-MachineLearning
|
0cbcee5cd1c7a1eb4299b14386e25d4adff4dd72
|
[
"Unlicense"
] | null | null | null |
from __future__ import print_function
from pyspark import SparkContext
import sys
def mapper(w):
arr = w.split(" ")
return (arr[0], int(broadcastVectorSmall.value[int(arr[1])-1].split(" ")[1]) * int(arr[2]))
if __name__ == "__main__":
if len(sys.argv) != 5:
print("Usage: matrix_small_c.py vectorfile matrixfile outputfile numberofpartition", file=sys.stderr)
exit(-1)
sc = SparkContext(appName="MyTestJob")
matrix_vector_small= []
for x in range(int(sys.argv[4])):
matrix_small = sc.textFile(sys.argv[2] + "/matrix_small"+str(x)+".txt")
vector_small = sc.textFile(sys.argv[1]+"/vector_small"+str(x)+".txt")
vector = sc.broadcast(sorted(vector_small.collect(),key=lambda val: int(val.split( )[0])))
# vector = [1,1]
rddval = matrix_small.map(lambda w: (w.split(" ")[0],int(vector.value[int(w.split(" ")[1])-1].split(" ")[1]) * int(w.split(" ")[2]))).reduceByKey(lambda x,y: int(x)+int(y))
matrix_vector_small.append(rddval)
final_rdd = matrix_vector_small[0]
for x in range (int(sys.argv[4]) - 1):
final_rdd = final_rdd.union( matrix_vector_small[x+1])
final = final_rdd.reduceByKey(lambda x,y: int(x) + int(y))
final.saveAsTextFile(sys.argv[3])
sc.stop()
| 41.387097
| 180
| 0.638348
|
4a192548320066966389eb1edefc4c7d50946b71
| 39,902
|
py
|
Python
|
jax/interpreters/xla.py
|
vballoli/jax
|
bbf7a432e86053024419ec8adb90aae3d06afb18
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-04-11T19:27:34.000Z
|
2020-07-26T00:31:53.000Z
|
jax/interpreters/xla.py
|
lhz1029/jax
|
a3cc9a7d327f46292d1edc5fcd2d0d771adc2bb9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/interpreters/xla.py
|
lhz1029/jax
|
a3cc9a7d327f46292d1edc5fcd2d0d771adc2bb9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import itertools as it
import operator as op
from typing import Any, Callable, Dict, Sequence, Type
from absl import logging
import numpy as onp
from ..config import flags, bool_env
from .. import core
from .. import ad_util
from .. import dtypes
from .. import lazy
from .. import linear_util as lu
from ..abstract_arrays import (ConcreteArray, ShapedArray, AbstractToken,
make_shaped_array, array_types, raise_to_shaped,
abstract_token)
from ..core import Literal, pp_eqn_compact
from ..pprint_util import pp
from ..util import (partial, partialmethod, cache, safe_map, prod, unzip2,
memoize, extend_name_stack, wrap_name)
from ..lib import xla_bridge as xb
from ..lib import xla_client as xc
from . import partial_eval as pe
from . import ad
from . import masking
from typing import Callable
FLAGS = flags.FLAGS
flags.DEFINE_bool('jax_debug_nans',
bool_env('JAX_DEBUG_NANS', False),
'Add nan checks to every operation.')
flags.DEFINE_bool('jax_log_compiles',
bool_env('JAX_LOG_COMPILES', False),
'Print a message each time a `jit` computation is compiled.')
def _map(f, *xs): return tuple(map(f, *xs))
def identity(x): return x
# unit representation
def _make_unit(c): return c.Constant(onp.zeros((), dtype=onp.dtype('bool')))
def _make_abstract_unit(_): return xc.Shape.array_shape(onp.dtype('bool'), ())
def _device_put_unit(_, device):
return xc.Buffer.from_pyval(onp.zeros((), dtype=onp.dtype('bool')), device,
backend=xb.get_device_backend(device))
def _make_array_shape(a):
return xc.Shape.array_shape(a.dtype, a.shape)
### handlers
xb.register_constant_handler(core.Unit, lambda c, *_: _make_unit(c))
def aval_to_xla_shape(aval):
try:
return xla_shape_handlers[type(aval)](aval)
except KeyError as err:
raise TypeError("No xla_shape_handler for type: {}".format(type(aval))
) from err
xla_shape_handlers: Dict[Type[core.AbstractValue], Callable] = {}
xla_shape_handlers[core.AbstractUnit] = _make_abstract_unit
xla_shape_handlers[ShapedArray] = _make_array_shape
xla_shape_handlers[ConcreteArray] = _make_array_shape
def aval_to_result_handler(device, aval):
try:
return xla_result_handlers[type(aval)](device, aval)
except KeyError as err:
raise TypeError("No xla_result_handler for type: {}".format(type(aval))
) from err
xla_result_handlers: Dict[Type[core.AbstractValue], Callable[..., Callable]] = {}
xla_result_handlers[core.AbstractUnit] = lambda _, __: lambda _: core.unit
def array_result_handler(device, aval):
return partial(DeviceArray, raise_to_shaped(aval), device, lazy.array(aval.shape))
xla_result_handlers[ShapedArray] = array_result_handler
xla_result_handlers[ConcreteArray] = array_result_handler
def device_put(x, device=None):
x = canonicalize_dtype(x)
try:
return device_put_handlers[type(x)](x, device)
except KeyError as err:
raise TypeError("No device_put handler for type: {}".format(type(x))
) from err
device_put_handlers: Dict[Any, Callable] = {}
device_put_handlers[core.Unit] = _device_put_unit
def _device_put_array(x, device):
return xc.Buffer.from_pyval(x, device, backend=xb.get_device_backend(device))
for _t in array_types:
device_put_handlers[_t] = _device_put_array
def _device_put_scalar(x, device):
return xc.Buffer.from_pyval(dtypes.coerce_to_array(x), device,
backend=xb.get_device_backend(device))
for _t in dtypes.python_scalar_dtypes.keys():
device_put_handlers[_t] = _device_put_array
# TODO(mattjj): try to remove this canonicalize_dtype stuff
def canonicalize_dtype(x):
typ = type(x)
handler = canonicalize_dtype_handlers.get(typ)
if handler: return handler(x)
for typ in typ.mro():
handler = canonicalize_dtype_handlers.get(typ)
if handler: return handler(x)
raise TypeError("No canonicalize_dtype handler for type: {}".format(type(x)))
canonicalize_dtype_handlers: Dict[Any, Callable] = {}
canonicalize_dtype_handlers[core.Unit] = identity
def _canonicalize_ndarray_dtype(x):
return onp.asarray(x, dtypes.canonicalize_dtype(dtypes.result_type(x)))
for _t in array_types:
canonicalize_dtype_handlers[_t] = _canonicalize_ndarray_dtype
def _canonicalize_python_scalar_dtype(typ, x):
return onp.asarray(
x, dtypes.canonicalize_dtype(dtypes.python_scalar_dtypes[typ]))
for _t in dtypes.python_scalar_dtypes.keys():
canonicalize_dtype_handlers[_t] = partial(_canonicalize_python_scalar_dtype, _t)
def abstractify(x) -> core.AbstractValue:
typ = type(x)
aval_fn = pytype_aval_mappings.get(typ)
if aval_fn: return aval_fn(x)
for typ in typ.mro():
aval_fn = pytype_aval_mappings.get(typ)
if aval_fn: return aval_fn(x)
raise TypeError("No abstraction handler for type: {}".format(type(x)))
pytype_aval_mappings: Dict[Any, Callable[[Any], core.AbstractValue]] = {}
pytype_aval_mappings[core.Unit] = lambda _: core.abstract_unit
for _t in array_types:
pytype_aval_mappings[_t] = make_shaped_array
def _make_abstract_python_scalar(typ, _):
return ShapedArray((), dtypes.python_scalar_dtypes[typ], weak_type=True)
for _t in dtypes.python_scalar_dtypes.keys():
pytype_aval_mappings[_t] = partial(_make_abstract_python_scalar, _t)
### op-by-op execution
def arg_spec(x):
aval = abstractify(x)
try:
return aval, x._device
except:
return aval, None
def apply_primitive(prim, *args, **params):
"""Impl rule that compiles and runs a single primitive 'prim' using XLA."""
compiled_fun = xla_primitive_callable(prim, *map(arg_spec, args), **params)
return compiled_fun(*args)
@cache()
def xla_primitive_callable(prim, *arg_specs, **params):
avals, arg_devices = unzip2(arg_specs)
device = _device_from_arg_devices(arg_devices)
backend = xb.get_device_backend(device)
aval_out = prim.abstract_eval(*avals, **params)
if not prim.multiple_results:
handle_result = aval_to_result_handler(device, aval_out)
else:
handlers = tuple(map(partial(aval_to_result_handler, device), aval_out))
handle_result = lambda xs: tuple(h(x) for h, x in zip(handlers, xs))
tuple_args = len(avals) > 100
if prim in initial_style_translations:
nreps = initial_style_primitive_replicas(params)
else:
nreps = 1
if nreps > xb.device_count(backend):
msg = ("compiling a primitive computation `{}` that requires {} replicas, "
"but only {} XLA devices are available on backend {}.")
raise ValueError(msg.format(prim, nreps, xb.device_count(backend),
backend.platform))
built_c = primitive_computation(prim, AxisEnv(nreps), backend, tuple_args,
*avals, **params)
options = xb.get_compile_options(
num_replicas=nreps,
num_partitions=1,
device_assignment=device and (device.id,))
options.tuple_arguments = tuple_args
compiled = built_c.Compile(compile_options=options, backend=backend)
if nreps == 1:
return partial(_execute_compiled_primitive, prim, compiled, backend,
handle_result)
else:
return partial(_execute_replicated_primitive, prim, compiled, backend,
handle_result)
def _device_from_arg_devices(devices):
"""Given devices of inputs, determine where to perform a computation.
Args:
devices: list where each element is a either a `Device` instance or `None`.
Returns:
A `Device` instance or None.
Raises:
ValueError if input devices are inconsistent.
"""
try:
device, = set(d for d in devices if d is not None) or (None,)
return device
except ValueError as err:
msg = "primitive arguments must be colocated on the same device, got {}"
raise ValueError(msg.format(", ".join(map(str, devices)))) from err
@cache()
def primitive_computation(prim, axis_env, backend, tuple_args, *avals, **params):
c = xb.make_computation_builder("primitive_computation_{}".format(prim.name))
c.SetOpMetadata(xc.OpMetadata(
op_type=prim.name,
op_name=str(pp_eqn_compact(prim.name, params))))
platform = xb.get_backend(backend).platform
xla_args = _xla_callable_args(c, avals, tuple_args)
# return val always set as a side-effect on c
if prim in backend_specific_translations[platform]:
rule = backend_specific_translations[platform][prim]
rule(c, *xla_args, **params)
elif prim in translations:
rule = translations[prim]
rule(c, *xla_args, **params)
elif prim in initial_style_translations:
rule = initial_style_translations[prim]
rule(c, axis_env, extend_name_stack(prim.name), avals, backend,
*xla_args, **params)
else:
raise NotImplementedError("XLA translation rule for {} not found".format(prim))
c.ClearOpMetadata()
try:
return c.Build()
except RuntimeError as e:
msg = (" ".join(map(str, e.args)) + "\n"
"This is a bug in JAX's shape-checking rules; please report it!\n"
"https://github.com/google/jax/issues\n")
raise RuntimeError(msg) from e
def primitive_subcomputation(prim, *avals, **params):
return primitive_computation(prim, AxisEnv(1), None, False, *avals, **params)
def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args):
device, = compiled.local_devices()
input_bufs = [device_put(x, device) for x in args if x is not token]
out_bufs = compiled.Execute(input_bufs)
if FLAGS.jax_debug_nans:
check_nans(prim, out_bufs)
return result_handler(out_bufs if prim.multiple_results else out_bufs[0])
def _execute_replicated_primitive(prim, compiled, backend, result_handler,
*args):
input_bufs = [
[device_put(x, device) for x in args if x is not token]
for device in compiled.local_devices()]
out_buf = compiled.ExecuteOnLocalDevices(input_bufs)[0]
if not prim.multiple_results:
out_buf, = out_buf
return result_handler(out_buf)
def check_nans(prim, bufs):
for buf in bufs:
_check_nans(prim.name, buf.shape(), buf)
def _check_nans(name, xla_shape, buf):
assert not xla_shape.is_tuple()
if dtypes.issubdtype(xla_shape.element_type(), onp.inexact):
if onp.any(onp.isnan(buf.to_py())):
msg = "invalid value (nan) encountered in {}"
raise FloatingPointError(msg.format(name))
### compiling jaxprs
def prefetch(x):
if isinstance(x, DeviceArray):
x.copy_to_host_async()
return x
def jaxpr_literals(jaxpr):
"""Generates all the literals inside a jaxpr, including nested subjaxprs."""
for eqn in jaxpr.eqns:
for v in eqn.invars:
if type(v) is core.Literal:
yield v.val
for subjaxpr in core.subjaxprs(jaxpr):
yield from jaxpr_literals(subjaxpr)
def jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, name_stack, *args):
platform = xb.get_backend(backend).platform
def read(v):
if type(v) is Literal:
return c.Constant(canonicalize_dtype(v.val))
else:
return env[v]
def aval(v):
if type(v) is Literal:
return abstractify(v.val)
else:
return v.aval
def write(v, node):
assert node is not None
env[v] = node
env = {}
write(core.unitvar, _make_unit(c))
_map(write, jaxpr.constvars, consts)
_map(write, jaxpr.invars, args)
for eqn in jaxpr.eqns:
c.SetOpMetadata(xc.OpMetadata(
op_type=eqn.primitive.name,
op_name=str(pp(name_stack) >> pp_eqn_compact(
eqn.primitive.name, eqn.params))))
in_nodes = list(map(read, eqn.invars))
if eqn.primitive in backend_specific_translations[platform]:
rule = backend_specific_translations[platform][eqn.primitive]
ans = rule(c, *in_nodes, **eqn.params)
elif eqn.primitive in translations:
ans = translations[eqn.primitive](c, *in_nodes, **eqn.params)
elif eqn.primitive in initial_style_translations:
new_params = check_backend_params(eqn.params, backend)
rule = initial_style_translations[eqn.primitive]
ans = rule(c, axis_env, extend_name_stack(name_stack, eqn.primitive.name),
map(aval, eqn.invars), backend, *in_nodes, **new_params)
elif eqn.primitive in parallel_translations:
replica_groups = axis_groups(axis_env, eqn.params['axis_name'])
new_params = {k: v for k, v in eqn.params.items() if k != 'axis_name'}
rule = parallel_translations[eqn.primitive]
ans = rule(c, *in_nodes, replica_groups=replica_groups, platform=platform,
**new_params)
elif eqn.primitive in call_translations:
new_params = check_backend_params(eqn.params, backend)
rule = call_translations[eqn.primitive]
ans = rule(c, axis_env, in_nodes,
name_stack, backend=backend, **new_params)
else:
msg = "XLA translation rule for primitive '{}' not found"
raise NotImplementedError(msg.format(eqn.primitive.name))
c.GetShape(ans) # force xla to do shape error checking
out_nodes = xla_destructure(c, ans) if eqn.primitive.multiple_results else [ans]
c.ClearOpMetadata()
_map(write, eqn.outvars, out_nodes)
return _map(read, jaxpr.outvars)
def xla_destructure(c, ans):
num_elements = len(c.GetShape(ans).tuple_shapes())
return [c.GetTupleElement(ans, i) for i in range(num_elements)]
def check_backend_params(params, outer_backend):
# For nested calls, the outermost call sets the backend for all inner calls;
# it's an error if the inner call has a conflicting explicit backend spec.
inner_backend = params.get('backend', None)
if inner_backend and inner_backend != outer_backend:
msg = (
"Outer-jit backend specification {} must match explicit inner-jit "
"backend specification {}.")
raise ValueError(msg.format(outer_backend, inner_backend))
return {k: params[k] for k in params if k != 'backend'}
class AxisEnv(object):
def __init__(self, nreps, names=(), sizes=(), devices=None):
assert isinstance(names, tuple)
assert isinstance(sizes, tuple)
self.nreps = nreps
self.names = names
self.sizes = sizes
self.devices = devices
def extend_axis_env(env, name, size):
return AxisEnv(env.nreps, env.names + (name,), env.sizes + (size,), env.devices)
def axis_read(axis_env, axis_name):
return max(i for i, name in enumerate(axis_env.names) if name == axis_name)
def axis_groups(axis_env, name):
if isinstance(name, (list, tuple)):
mesh_axes = tuple(map(partial(axis_read, axis_env), name))
else:
mesh_axes = (axis_read(axis_env, name),)
return _axis_groups(axis_env.nreps, axis_env.sizes, mesh_axes)
def _axis_groups(nrep, mesh_spec, mesh_axes):
trailing_size, ragged = divmod(nrep, prod(mesh_spec))
assert not ragged
full_spec = list(mesh_spec) + [trailing_size]
iota = onp.arange(prod(full_spec)).reshape(full_spec)
groups = onp.reshape(
onp.moveaxis(iota, mesh_axes, onp.arange(len(mesh_axes))),
(prod(onp.take(full_spec, mesh_axes)), -1))
return tuple(map(tuple, groups.T))
def jaxpr_replicas(jaxpr):
"""The number of replicas needed for a jaxpr.
For a eqn, multiply the `axis_size` with the `jaxpr_replicas` of the
subjaxprs. For a list of eqns, take the maximum number of replicas.
"""
return max(it.chain([1], (eqn_replicas(eqn) for eqn in jaxpr.eqns)))
# TODO(mattjj): this function assumes that only pmap has a parameter named
# axis_size, and that it corresponds to cross-replica mapping
def eqn_replicas(eqn):
call_jaxpr = eqn.params.get("call_jaxpr")
if call_jaxpr:
return eqn.params.get('axis_size', 1) * jaxpr_replicas(call_jaxpr)
elif eqn.primitive in initial_style_translations:
return initial_style_primitive_replicas(eqn.params)
else:
return 1
def initial_style_primitive_replicas(params):
nums = (jaxpr_replicas(param if type(param) is core.Jaxpr else param.jaxpr)
for param in params.values()
if type(param) in (core.Jaxpr, core.TypedJaxpr))
return max(it.chain([1], nums))
# TODO(mattjj,skyewm): the functions here are utilities for checking if
# not-yet-supported features are used with multi-host programming
def jaxpr_has_pmap(jaxpr):
"""Whether there is an xla_pmap primitive anywhere inside a Jaxpr."""
for eqn in jaxpr.eqns:
if 'xla_pmap' in eqn.primitive.name:
return True
for subjaxpr in core.subjaxprs(jaxpr):
if jaxpr_has_pmap(subjaxpr):
return True
return False
def jaxpr_collectives(jaxpr):
"""Generates all the collective primitives anywhere inside a Jaxpr."""
for eqn in jaxpr.eqns:
if eqn.primitive in parallel_translations:
yield eqn.primitive
for subjaxpr in core.subjaxprs(jaxpr):
yield from jaxpr_collectives(subjaxpr)
### xla_call underlying jit
def _xla_call_impl(fun: lu.WrappedFun, *args, device, backend, name):
compiled_fun = _xla_callable(fun, device, backend, name, *map(arg_spec, args))
try:
return compiled_fun(*args)
except FloatingPointError:
print("Invalid value encountered in the output of a jit function. "
"Calling the de-optimized version.")
return fun.call_wrapped(*args) # probably won't return
@lu.cache
def _xla_callable(fun: lu.WrappedFun, device, backend, name, *arg_specs):
if device is not None and backend is not None:
raise ValueError("can't specify both a device and a backend for jit, "
"got device={} and backend={}".format(device, backend))
abstract_args, arg_devices = unzip2(arg_specs)
pvals: Sequence[pe.PartialVal] = [pe.PartialVal.unknown(aval) for aval in abstract_args]
jaxpr, pvals, consts = pe.trace_to_jaxpr(
fun, pvals, instantiate=False, stage_out=True, bottom=True)
_map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))
nreps = jaxpr_replicas(jaxpr)
device = _xla_callable_device(nreps, backend, device, arg_devices)
result_handlers = tuple(map(partial(_pval_to_result_handler, device), pvals))
# Computations that only produce constants and/or only rearrange their inputs,
# which are often produced from partial evaluation, don't need compilation,
# and don't need to force their (potentially lazy) arguments.
if not jaxpr.eqns:
device = device or xb.get_backend(None).get_default_device_assignment(1)[0]
return partial(_execute_trivial, jaxpr, device, consts, result_handlers)
log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG
logging.log(log_priority,
"Compiling {} for args {}.".format(fun.__name__, abstract_args))
if nreps > xb.device_count(backend):
msg = ("compiling computation that requires {} replicas, but only {} XLA "
"devices are available")
raise ValueError(msg.format(nreps, xb.device_count(backend)))
if xb.host_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):
raise NotImplementedError(
"jit of multi-host pmap not implemented (and jit-of-pmap can cause "
"extra data movement anyway, so maybe you don't want it after all).")
tuple_args = len(abstract_args) > 100 # pass long arg lists as tuple for TPU
c = xb.make_computation_builder("jit_{}".format(fun.__name__))
xla_consts = _map(c.Constant, consts)
xla_args = _xla_callable_args(c, abstract_args, tuple_args)
out_nodes = jaxpr_subcomp(
c, jaxpr, backend, AxisEnv(nreps, (), ()), xla_consts,
extend_name_stack(wrap_name(name, 'jit')), *xla_args)
built = c.Build(c.Tuple(*out_nodes))
options = xb.get_compile_options(
num_replicas=nreps,
num_partitions=1,
device_assignment=(device.id,) if device else None)
options.tuple_arguments = tuple_args
compiled = built.Compile(compile_options=options, backend=xb.get_backend(backend))
if nreps == 1:
return partial(_execute_compiled, compiled, backend, result_handlers)
else:
return partial(_execute_replicated, compiled, backend, result_handlers)
def _xla_callable_device(nreps, backend, device, arg_devices):
if nreps > 1:
if device is not None or backend is not None:
raise ValueError("can't specify device or backend for jit-of-pmap, "
"got device={} and backend={}".format(device, backend))
return None
else:
if device is None and backend is None:
return _device_from_arg_devices(arg_devices)
elif device is not None and backend is None:
return device
elif device is None and backend is not None:
return xb.get_backend(backend).get_default_device_assignment(1)[0]
else:
assert False # Unreachable given the error check in _xla_callable
def _xla_callable_args(c, avals, tuple_args):
if not tuple_args:
xla_args = [c.ParameterWithShape(aval_to_xla_shape(a))
if a is not abstract_token else c.CreateToken() for a in avals]
return xla_args
else:
tuple_param = c.ParameterWithShape(xc.Shape.tuple_shape(
[aval_to_xla_shape(a) for a in avals if a is not abstract_token]))
xla_inputs = iter(xla_destructure(c, tuple_param))
xla_args = [next(xla_inputs) if a is not abstract_token else c.CreateToken()
for a in avals]
assert next(xla_inputs, None) is None
return xla_args
def _pval_to_result_handler(device, pval):
pv, const = pval
if pv is None:
const = _device_put_impl(const, device) if device else const
return lambda _: const
else:
return aval_to_result_handler(device, pv)
def _execute_compiled(compiled, backend, handlers, *args):
device, = compiled.local_devices()
input_bufs = [device_put(x, device) for x in args if x is not token]
out_bufs = compiled.Execute(input_bufs)
if FLAGS.jax_debug_nans: check_nans(xla_call_p, out_bufs)
return [handler(out_buf) for handler, out_buf in zip(handlers, out_bufs)]
def _execute_replicated(compiled, backend, handlers, *args):
input_bufs = [
[device_put(x, device) for x in args if x is not token]
for device in compiled.local_devices()]
out_bufs = compiled.ExecuteOnLocalDevices(input_bufs)[0]
if FLAGS.jax_debug_nans: check_nans(xla_call_p, out_bufs)
return [handler(out_buf) for handler, out_buf in zip(handlers, out_bufs)]
def _execute_trivial(jaxpr, device, consts, handlers, *args):
env = {core.unitvar: core.unit}
_map(env.setdefault, jaxpr.invars, args)
_map(env.setdefault, jaxpr.constvars, consts)
outs = [canonicalize_dtype(v.val) if type(v) is Literal else env[v]
for v in jaxpr.outvars]
return [_copy_device_array_to_device(x, device) if type(x) is DeviceArray
else h(device_put(x, device)) for h, x in zip(handlers, outs)]
@memoize
def _get_device(device, backend):
# TODO(mattjj): after jaxlib update, avoid compile here, just to get device
c = xb.make_computation_builder("get_device")
built = c.Build(_make_unit(c))
options = xb.get_compile_options(
num_replicas=1,
num_partitions=1,
device_assignment=(device.id,) if device else None)
compiled = built.Compile(compile_options=options, backend=xb.get_backend(backend))
out, = compiled.local_devices()
return out
xla_call_p = core.Primitive('xla_call')
xla_call_p.call_primitive = True
xla_call_p.multiple_results = True
xla_call = partial(core.call_bind, xla_call_p)
xla_call_p.def_custom_bind(xla_call)
xla_call_p.def_impl(_xla_call_impl)
def _xla_call_translation_rule(c, axis_env,
in_nodes, name_stack, backend, name,
call_jaxpr, device=None):
del device # Ignored.
subc = xb.make_computation_builder("jit_{}".format(name))
args = [subc.ParameterWithShape(c.GetShape(n)) for n in in_nodes]
out_nodes = jaxpr_subcomp(subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, wrap_name(name, 'jit')), *args)
subc = subc.Build(subc.Tuple(*out_nodes))
return c.Call(subc, list(in_nodes))
ad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)
### translation tables
translations: Dict[core.Primitive, Callable] = {}
parallel_translations: Dict[core.Primitive, Callable] = {}
initial_style_translations: Dict[core.Primitive, Callable] = {}
call_translations: Dict[core.Primitive, Callable] = {}
backend_specific_translations: Dict[str, Dict[core.Primitive, Callable]] = defaultdict(dict)
translations[core.identity_p] = lambda c, x: x
call_translations[xla_call_p] = _xla_call_translation_rule
def zeros_like_translation_rule(c, x):
shape = c.GetShape(x)
assert not shape.is_tuple()
zero = c.Constant(onp.array(0, shape.element_type()))
return c.Broadcast(zero, shape.dimensions())
translations[ad_util.zeros_like_p] = zeros_like_translation_rule
def add_jaxvals_translation_rule(c, x, y):
shape = c.GetShape(x)
assert not shape.is_tuple()
return c.Add(x, y)
translations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule
@lu.transformation
def _tuple_output(*args, **kwargs):
ans = yield args, kwargs
yield (ans,)
def lower_fun(fun, multiple_results=True):
# This function can only be used to lower functions that take JAX array types
# as arguments (and e.g. don't accept unit values), because it assumes it can
# map from XLA types to JAX types. In general that mapping is not possible (as
# the mapping from JAX types to XLA types is not invertible), but for now at
# least we assume that the mapping from JAX *array* types to XLA array types
# is invertible. This assumption is unchecked!
# TODO(mattjj): remove assumption can map XLA array types to JAX array types
def f(c, *xla_args, **params):
# TODO(mattjj): revise this 'calling convention'
avals = [_array_aval_from_xla_shape(c.GetShape(x)) for x in xla_args]
pvals = [pe.PartialVal.unknown(a) for a in avals]
wrapped_fun = lu.wrap_init(fun, params)
if not multiple_results:
wrapped_fun = _tuple_output(wrapped_fun)
jaxpr, _, consts = pe.trace_to_jaxpr(wrapped_fun, pvals, instantiate=True,
stage_out=True)
consts = _map(c.Constant, consts)
outs = jaxpr_subcomp(c, jaxpr, None, AxisEnv(1), consts, '', *xla_args)
if multiple_results:
return c.Tuple(*outs)
else:
assert len(outs) == 1, outs
return outs[0]
return f
def _array_aval_from_xla_shape(xla_shape):
# This function instantiates the assumption that we can map fro XLA array
# types to JAX array types.
# TODO(mattjj): remove assumption can map XLA array types to JAX array types
assert not xla_shape.is_tuple()
return ShapedArray(xla_shape.dimensions(), xla_shape.numpy_dtype())
def lower_fun_initial_style(fun):
def f(c, axis_env, name_stack, avals, backend, *xla_args, **params):
pvals = [pe.PartialVal.unknown(a) for a in avals]
jaxpr, _, consts = pe.trace_to_jaxpr(
lu.wrap_init(fun, params), pvals, instantiate=True, stage_out=True)
consts = _map(c.Constant, consts)
outs = jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, name_stack,
*xla_args)
return c.Tuple(*outs)
return f
### device-persistent data
class Token(object): pass
token = Token()
pytype_aval_mappings[Token] = lambda _: abstract_token
core.pytype_aval_mappings[Token] = lambda _: abstract_token
xla_shape_handlers[AbstractToken] = lambda _: xc.Shape.token_shape()
xla_result_handlers[AbstractToken] = lambda _, __: lambda _: token
canonicalize_dtype_handlers[Token] = identity
class DeviceValue(object):
"""A DeviceValue represents a value backed by device memory."""
__slots__ = ["aval", "device_buffer", "__weakref__"]
def __init__(self, aval, device_buffer):
self.aval = aval
self.device_buffer = device_buffer
def _check_if_deleted(self):
if self.device_buffer is deleted_buffer:
raise ValueError("DeviceValue has been deleted.")
def block_until_ready(self):
"""Blocks the caller until the buffer's value has been computed on device.
This method is mostly useful for timing microbenchmarks that wish to
time how long a computation takes, without transferring the result back
to the host.
Returns the buffer object (`self`).
"""
self._check_if_deleted()
self.device_buffer.block_host_until_ready()
return self
def _forward_method(attrname, self, fun, *args):
return fun(getattr(self, attrname), *args)
_forward_to_value = partial(_forward_method, "_value")
class DeviceArray(DeviceValue):
"""A DeviceArray is an ndarray backed by a single device memory buffer."""
# We don't subclass ndarray because that would open up a host of issues,
# but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.
__slots__ = ["_npy_value", "_device", "_lazy_expr"]
__array_priority__ = 100
def __init__(self, aval, device, lazy_expr, device_buffer):
self.aval = aval
self.device_buffer = device_buffer
self._device = device
self._lazy_expr = lazy_expr
self._npy_value = None
if not core.skip_checks:
assert type(aval) is ShapedArray
npy_value = self._value
assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape
@property
def _value(self):
self._check_if_deleted()
if self._npy_value is None:
if is_device_constant(self):
self._npy_value = lazy.eval_lexpr(self._lazy_expr, None)
else:
self._npy_value = _force(self).device_buffer.to_py()
self._npy_value.flags.writeable = False
return self._npy_value
@property
def shape(self):
return self.aval.shape
@property
def dtype(self):
return self.aval.dtype
@property
def size(self):
return prod(self.aval.shape)
@property
def ndim(self):
return len(self.aval.shape)
def copy(self):
"""Returns an ndarray (backed by host memory, not device memory)."""
return onp.asarray(self)
def copy_to_host_async(self):
"""Requests a copy of the buffer to the host."""
self._check_if_deleted()
if self._npy_value is None and not is_device_constant(self):
self.device_buffer.copy_to_host_async()
def delete(self):
"""Deletes the device array and any cached copy on the host.
It is an error to access the contents of a `DeviceArray` after it has
been deleted.
Use of this method is optional; device buffers will be reclaimed
automatically by Python when a DeviceArray object is garbage collected.
However, it is sometimes useful to have more explicit control over the
time of deletion.
"""
self.device_buffer.delete()
self.device_buffer = deleted_buffer
self._npy_value = None
def __repr__(self):
line_width = onp.get_printoptions()['linewidth']
prefix = '{}('.format(self.__class__.__name__)
s = onp.array2string(self._value, prefix=prefix, suffix=',',
separator=', ', max_line_width=line_width)
dtype_str = 'dtype={})'.format(self.dtype.name)
last_line_len = len(s) - s.rfind('\n') + 1
sep = ' '
if last_line_len + len(dtype_str) + 1 > line_width:
sep = ' ' * len(prefix)
return "{}{},{}{}".format(prefix, s, sep, dtype_str)
def item(self):
if dtypes.issubdtype(self.dtype, onp.complexfloating):
return complex(self)
elif dtypes.issubdtype(self.dtype, onp.floating):
return float(self)
elif dtypes.issubdtype(self.dtype, onp.integer):
return int(self)
elif dtypes.issubdtype(self.dtype, onp.bool_):
return bool(self)
else:
raise TypeError(self.dtype)
def __len__(self):
try:
return self.aval.shape[0]
except IndexError as err:
raise TypeError("len() of unsized object") from err # same as numpy error
def __iter__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array") # same as numpy error
else:
return self._value.__iter__()
def __reversed__(self):
if self.ndim == 0:
raise TypeError("iteration over a 0-d array")
else:
return reversed(self._value)
def __format__(self, format_spec):
# Simulates behavior of https://github.com/numpy/numpy/pull/9883
if self.ndim == 0:
return format(self._value[()], format_spec)
else:
return format(self._value, format_spec)
def __array__(self, dtype=None, context=None):
return onp.asarray(self._value, dtype=dtype)
@property
def __cuda_array_interface__(self):
return _force(self).device_buffer.__cuda_array_interface__
__str__ = partialmethod(_forward_to_value, str)
__bool__ = __nonzero__ = partialmethod(_forward_to_value, bool)
def __float__(self): return self._value.__float__()
def __int__(self): return self._value.__int__()
def __complex__(self): return self._value.__complex__()
__hex__ = partialmethod(_forward_to_value, hex)
__oct__ = partialmethod(_forward_to_value, oct)
__index__ = partialmethod(_forward_to_value, op.index)
# pickle saves and loads just like an ndarray
__reduce__ = partialmethod(_forward_to_value, op.methodcaller("__reduce__"))
# clobbered when jax.numpy is imported, but useful in tests
def __eq__(self, other): return self._value == other
def __hash__(self):
raise TypeError("JAX DeviceArray, like numpy.ndarray, is not hashable.")
# The following methods are dynamically overridden in lax_numpy.py.
def __getitem__(self, i): raise NotImplementedError
class DeletedBuffer(object): pass
deleted_buffer = DeletedBuffer()
class DeviceConstant(object):
__slots__ = ["_device"]
def __init__(self, device=None): self._device = device
def device(self): return self._device
def to_py(self): return None
def is_device_constant(x):
return type(x) is DeviceArray and type(x.device_buffer) is DeviceConstant
core.literalable_types.add(DeviceArray)
core.pytype_aval_mappings[DeviceArray] = ConcreteArray
pytype_aval_mappings[DeviceArray] = op.attrgetter('aval')
canonicalize_dtype_handlers[DeviceArray] = identity
def _device_array_constant_handler(c, val, canonicalize_types=True):
if is_device_constant(val):
return lazy.stage_lexpr(c, val._lazy_expr, None)
else:
base_val = c.Constant(val.device_buffer.to_py())
return lazy.stage_lexpr(c, val._lazy_expr, base_val)
xb.register_constant_handler(DeviceArray, _device_array_constant_handler)
def _device_put_device_array(x, device):
x = _copy_device_array_to_device(x, device)
return _force(x).device_buffer
device_put_handlers[DeviceArray] = _device_put_device_array
def _copy_device_array_to_device(x, device):
if is_device_constant(x):
return DeviceArray(x.aval, device, x._lazy_expr, DeviceConstant(device))
elif xb.get_device_backend(device).platform == x.device_buffer.platform():
if device is None or x.device_buffer.device() == device:
return x
else:
moved_buf = x.device_buffer.copy_to_device(device)
else:
# Buffers from different XLA backends are passed through the host.
moved_buf = xc.Buffer.from_pyval(x.device_buffer.to_py(), device,
backend=xb.get_device_backend(device))
return DeviceArray(x.aval, device, x._lazy_expr, moved_buf)
def _force(x: DeviceArray) -> DeviceArray:
if lazy.is_trivial(x._lazy_expr):
return x
else:
# force x on the device where it lives, but preserve stickiness on result
if x._device:
device = x._device
sticky = True
else:
device = x.device_buffer.device()
sticky = False
force_fun = _lazy_force_computation(sticky, x.aval, device, x._lazy_expr)
return force_fun(x)
@cache()
def _lazy_force_computation(sticky, aval, device, lexpr) -> Callable[[DeviceArray], DeviceArray]:
c = xb.make_computation_builder("lazy_force")
if lazy.is_constant(lexpr):
param = None
else:
idxs = [(src, dst) for dst, src in enumerate(lexpr.dims) if src is not None]
param_shape = [None] * len(idxs)
for src, dst in idxs:
param_shape[src] = aval.shape[dst]
param = c.ParameterWithShape(xc.Shape.array_shape(aval.dtype, param_shape))
xla_out = lazy.stage_lexpr(c, lexpr, param)
built_c = c.Build(xla_out)
device = _device_from_arg_devices([device])
options = xb.get_compile_options(
num_replicas=1,
num_partitions=1,
device_assignment=device and (device.id,))
backend = xb.get_device_backend(device)
compiled = built_c.Compile(compile_options=options, backend=backend)
result_device = device if sticky else None
handler = partial(DeviceArray, aval, result_device, lazy.array(aval.shape))
force_fun: Callable[[DeviceValue], DeviceArray]
if lazy.is_constant(lexpr):
def force_fun(_):
return handler(compiled.Execute([])[0])
else:
def force_fun(x):
return handler(compiled.Execute([x.device_buffer])[0])
return force_fun
def _device_put_impl(x, device=None):
if type(x) is DeviceArray:
return _copy_device_array_to_device(x, device)
try:
a = abstractify(x)
except TypeError as err:
raise TypeError("Argument '{}' of type {} is not a valid JAX type"
.format(x, type(x))) from err
handler = aval_to_result_handler(device, a)
return handler(device_put(x, device))
device_put_p = core.Primitive('device_put')
device_put_p.def_impl(_device_put_impl)
pe.custom_partial_eval_rules[device_put_p] = lambda trace, x, **params: x
ad.deflinear(device_put_p, lambda cotangent, **kwargs: [cotangent])
masking.shape_rules[device_put_p] = lambda x, **_: x.shape
masking.defvectorized(device_put_p)
def _remat_translation_rule(c, axis_env, in_nodes,
name_stack, backend, name, call_jaxpr,
device=None, concrete=None):
"""Lower remat to a Conditional which always returns true. This:
1. Circumvents common subexpression elimination.
2. In common case of `jax.grad(jax.remat(f))`, ensures the remat blocks
occur after the primal blocks, because cotangent is an input to the
Conditional."""
del device, concrete # Unused.
# Fake condition which always selects True branch.
rng = c.RngUniform(c.Constant(onp.array(0, dtype=onp.float32)),
c.Constant(onp.array(1, dtype=onp.float32)),
[])
pred = c.Lt(rng, c.Constant(onp.array(2, dtype=onp.float32)))
true_op = c.Tuple(*in_nodes)
remat_subc = xb.make_computation_builder("remat_call_subcomputation")
input_op = remat_subc.ParameterWithShape(c.GetShape(true_op), replicated=[])
args = [remat_subc.GetTupleElement(input_op, i) for i in range(len(in_nodes))]
out_nodes = jaxpr_subcomp(remat_subc, call_jaxpr, backend, axis_env, (),
extend_name_stack(name_stack, wrap_name(name, 'remat')),
*args)
out_node_shapes = [remat_subc.GetShape(o) for o in out_nodes]
remat_subc = remat_subc.Build(remat_subc.Tuple(*out_nodes))
false_op = true_op
dummy_subc = xb.make_computation_builder("remat_call_dummy_subcomputation")
dummy_subc.ParameterWithShape(c.GetShape(false_op), replicated=[])
def zeros(xla_shape):
shape, dtype = xla_shape.dimensions(), xla_shape.numpy_dtype()
zero = dummy_subc.Constant(onp.array(0, dtype=dtype))
return dummy_subc.Broadcast(zero, shape)
out_nodes = [zeros(s) for s in out_node_shapes]
dummy_subc = dummy_subc.Build(dummy_subc.Tuple(*out_nodes))
return c.Conditional(pred, true_op, remat_subc, false_op, dummy_subc)
call_translations[pe.remat_call_p] = _remat_translation_rule
| 38.367308
| 97
| 0.717383
|
4a19254b4ca6a8323ca76214e882e656e33a3498
| 98,268
|
py
|
Python
|
electroncash/network.py
|
majcosta/ElectrumBCHA
|
23f1ec1755c87bc2e44142edc7d6276a07ca7516
|
[
"MIT"
] | 23
|
2020-11-23T21:49:20.000Z
|
2022-02-23T05:43:44.000Z
|
electroncash/network.py
|
meaze0507/ElectrumABC
|
262dd0acc00763422eededf5ab20c7cb82674cdb
|
[
"MIT"
] | 73
|
2020-11-24T19:04:12.000Z
|
2022-03-25T15:09:37.000Z
|
electroncash/network.py
|
meaze0507/ElectrumABC
|
262dd0acc00763422eededf5ab20c7cb82674cdb
|
[
"MIT"
] | 6
|
2020-11-24T05:53:14.000Z
|
2022-01-24T16:09:36.000Z
|
# Electrum ABC - lightweight eCash client
# Copyright (C) 2020 The Electrum ABC developers
# Copyright (c) 2011-2016 Thomas Voegtlin
# Copyright (C) 2017-2020 The Electron Cash Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import stat
import errno
import random
import re
import select
from collections import defaultdict
import threading
import socket
import json
from typing import Dict
import socks
from . import util
from . import bitcoin
from . import networks
from .i18n import _
from .interface import Connection, Interface
from . import blockchain
from . import version
from .tor import TorController, check_proxy_bypass_tor_control
from .utils import Event
DEFAULT_AUTO_CONNECT = True
# Versions prior to 4.0.15 had this set to True, but EC opted for False to
# promote network health by allowing clients to connect to new servers easily.
# For now it is better to set it to True again to avoid having new users
# start on the wrong chain.
DEFAULT_WHITELIST_SERVERS_ONLY = True
def parse_servers(result):
""" parse servers list into dict format"""
servers = {}
for item in result:
try:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match(r"[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = networks.net.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match(r"v(.?)+", v):
version = v[1:]
elif re.match(r"p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
except (TypeError, ValueError, IndexError, KeyError) as e:
util.print_error("parse_servers:", item, repr(e))
return servers
def filter_version(servers):
def is_recent(vv):
try:
return version.normalize_version(vv) >= version.normalize_version(version.PROTOCOL_VERSION)
except Exception as e:
util.print_error("filter_version:", repr(e))
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
Protocol may be: 's', 't', or 'st' for both.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
for proto in protocol:
port = portmap.get(proto)
if port:
eligible.append(serialize_server(host, port, proto))
return eligible
def get_eligible_servers(hostmap=None, protocol="s", exclude_set=set()):
if hostmap is None:
hostmap = networks.net.DEFAULT_SERVERS
return list(set(filter_protocol(hostmap, protocol)) - exclude_set)
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
eligible = get_eligible_servers(hostmap, protocol, exclude_set)
return random.choice(eligible) if eligible else None
def servers_to_hostmap(servers):
''' Takes an iterable of HOST:PORT:PROTOCOL strings and breaks them into
a hostmap dict of host -> { protocol : port } suitable to be passed to
pick_random_server() and get_eligible_servers() above.'''
ret = dict()
for s in servers:
try:
host, port, protocol = deserialize_server(s)
except (AssertionError, ValueError, TypeError) as e:
util.print_error("[servers_to_hostmap] deserialization failure for server:", s, "error:", str(e))
continue # deserialization error
m = ret.get(host, dict())
need_add = len(m) == 0
m[protocol] = port
if need_add:
m['pruning'] = '-' # hmm. this info is missing, so give defaults just to make the map complete.
m['version'] = version.PROTOCOL_VERSION
ret[host] = m
return ret
def hostmap_to_servers(hostmap):
''' The inverse of servers_to_hostmap '''
return filter_protocol(hostmap, protocol = 'st')
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
try:
# Fix for #1182 -- bad proxy can end up in config file
int(proxy['port'])
except (ValueError, TypeError):
return None
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
bypass_proxy_filters = [check_proxy_bypass_tor_control]
def _socksocket_filtered(*args, **kwargs):
"""
This function checks bypass_proxy_filters and if any of the filters returns true
a raw socket will be returned, otherwise a socks socket will be returned.
"""
if any(f(*args, **kwargs) for f in bypass_proxy_filters):
if socket._socketobject:
return socket._socketobject(*args, **kwargs)
else:
return socket.socket(*args, **kwargs)
else:
return socks.socksocket(*args, **kwargs)
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
INSTANCE = None # Only 1 Network instance is ever alive during app lifetime (it's a singleton)
# These defaults are decent for the desktop app. Other platforms may
# override these at any time (iOS sets these to lower values).
NODES_RETRY_INTERVAL = 60 # How often to retry a node we know about in secs, if we are connected to less than 10 nodes
SERVER_RETRY_INTERVAL = 10 # How often to reconnect when server down in secs
MAX_MESSAGE_BYTES = 1024*1024*32 # = 32MB. The message size limit in bytes. This is to prevent a DoS vector whereby the server can fill memory with garbage data.
tor_controller: TorController = None
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.blacklisted_servers = set(self.config.get('server_blacklist', []))
self.whitelisted_servers, self.whitelisted_servers_hostmap = self._compute_whitelist()
self.print_error("server blacklist: {} server whitelist: {}".format(self.blacklisted_servers, self.whitelisted_servers))
self.default_server = self.get_config_server()
self.bad_certificate_servers: Dict[str, str] = dict()
self.server_list_updated = Event()
self.tor_controller = TorController(self.config)
self.tor_controller.active_port_changed.append(self.on_tor_port_changed)
self.tor_controller.start()
self.lock = threading.Lock()
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
self.interface_lock = threading.RLock() # <- re-entrant
self.pending_sends_lock = threading.Lock()
self.pending_sends = []
self.message_id = util.Monotonic(locking=True)
self.verified_checkpoint = False
self.verifications_required = 1
# If the height is cleared from the network constants, we're
# taking looking to get 3 confirmations of the first verification.
if networks.net.VERIFICATION_BLOCK_HEIGHT is None:
self.verifications_required = 3
self.checkpoint_servers_verified = {}
self.checkpoint_height = networks.net.VERIFICATION_BLOCK_HEIGHT
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {} # note: needs self.interface_lock
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
os.chmod(dir_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# subscriptions and requests
self.subscribed_addresses = set()
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None # note: needs self.interface_lock
self.interfaces = {} # note: needs self.interface_lock
self.auto_connect = self.config.get('auto_connect', DEFAULT_AUTO_CONNECT)
self.connecting = set()
self.requested_chunks = set()
self.socket_queue = queue.Queue()
if Network.INSTANCE:
# This happens on iOS which kills and restarts the daemon on app sleep/wake
self.print_error("A new instance has started and is replacing the old one.")
Network.INSTANCE = self # This implicitly should force stale instances to eventually del
self.start_network(deserialize_server(self.default_server)[2], deserialize_proxy(self.config.get('proxy')))
def on_tor_port_changed(self, controller: TorController):
if not controller.active_socks_port or not controller.is_enabled() or not self.config.get('tor_use', False):
return
proxy = deserialize_proxy(self.config.get('proxy'))
port = str(controller.active_socks_port)
if proxy["port"] == port:
return
proxy["port"] = port
self.config.set_key('proxy', serialize_proxy(proxy))
# This handler can run before `proxy` is present and `load_parameters` needs it
if hasattr(self, "proxy"):
self.load_parameters()
def __del__(self):
""" NB: due to Network.INSTANCE keeping the singleton instance alive,
this code isn't normally reached, except for in the iOS
implementation, which kills the daemon and the network before app
sleep, and creates a new daemon and netwok on app awake. """
if Network.INSTANCE is self: # This check is important for iOS
Network.INSTANCE = None # <--- Not normally reached, but here for completeness.
else:
self.print_error("Stale instance deallocated")
if hasattr(super(), '__del__'):
super().__del__()
@staticmethod
def get_instance():
""" Returns the extant Network singleton, if any, or None if in offline mode """
return Network.INSTANCE
def callback_listener_count(self, event):
return len(self.callbacks.get(event, [])) # we intentionally don't take any locks here as a performance optimization
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
if event in self._deprecated_alternatives:
self._warn_deprecated_callback(event)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
try:
callbacks.remove(callback)
except ValueError:
pass
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
self._legacy_callback_detector_and_mogrifier(event, *args)
def _legacy_callback_detector_and_mogrifier(self, event, *args):
if (event in ('blockchain_updated', 'wallet_updated')
and 'updated' in self.callbacks):
# Translate the blockchain_updated and wallet_updated events
# into the legacy 'updated' event for old external plugins that
# still rely on this event existing. There are some external
# electron cash plugins that still use this event, and we need
# to keep this hack here so they don't break on new EC
# versions. "Technical debt" :)
self.trigger_callback('updated') # we will re-enter this function with event == 'updated' (triggering the warning in the elif clause below)
elif event == 'verified2' and 'verified' in self.callbacks:
# pop off the 'wallet' arg as the old bad 'verified' callback lacked it.
self.trigger_callback('verified', args[1:]) # we will re-enter this function with event == 'verified' (triggering the warning in the elif clause below)
elif event in self._deprecated_alternatives:
# If we see updated or verified events come through here, warn:
# deprecated. Note that the above 2 clauses will also trigger this
# execution path.
self._warn_deprecated_callback(event)
_deprecated_alternatives = {
'updated' : "'blockchain_updated' and/or 'wallet_updated'",
'verified': "'verified2'",
}
def _warn_deprecated_callback(self, which):
alt = self._deprecated_alternatives.get(which)
if alt:
self.print_error("Warning: Legacy '{}' callback is deprecated, it is recommended that you instead use: {}. Please update your code.".format(which, alt))
else:
self.print_error("Warning: Legacy '{}' callback is deprecated. Please update your code.".format(which))
def recent_servers_file(self):
return os.path.join(self.config.path, "recent-servers")
def read_recent_servers(self):
if not self.config.path:
return []
try:
with open(self.recent_servers_file(), "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(self.recent_servers_file(), "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
with self.interface_lock:
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None, *, callback=None, max_qlen=None):
""" If you want to queue a request on any interface it must go through
this function so message ids are properly tracked.
Returns the monotonically increasing message id for this request.
May return None if queue is too full (max_qlen). (max_qlen is only
considered if callback is not None.)
Note that the special argument interface='random' will queue the request
on a random, currently active (connected) interface. Otherwise
`interface` should be None or a valid Interface instance.
If no interface is available:
- If `callback` is supplied: the request will be enqueued and sent
later when an interface becomes available
- If callback is not supplied: an AssertionError exception is raised
"""
if interface is None:
interface = self.interface
elif interface == 'random':
interface = random.choice(self.get_interfaces(interfaces=True)
or (None,)) # may set interface to None if no interfaces
message_id = self.message_id() # Note: self.message_id is a Monotonic (thread-safe) counter-object, see util.Monotonic
if callback:
if max_qlen and len(self.unanswered_requests) >= max_qlen:
# Indicate to client code we are busy
return None
self.unanswered_requests[message_id] = [method, params, callback]
if not interface:
# Request was queued -- it should get sent if/when we get
# an interface in the future
return message_id
# Now, if no interface, we will raise AssertionError
assert isinstance(interface, Interface), "queue_request: No interface! (request={} params={})".format(method, params)
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
if self is not Network.INSTANCE:
self.print_error("*** WARNING: queueing request on a stale instance!")
return message_id
def send_subscriptions(self):
self.sub_cache.clear()
# Resend unanswered requests
old_reqs = self.unanswered_requests
self.unanswered_requests = {}
for m_id, request in old_reqs.items():
message_id = self.queue_request(request[0], request[1], callback = request[2])
assert message_id is not None
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
self.queue_request('blockchain.relayfee', [])
n_defunct = 0
method = 'blockchain.scripthash.subscribe'
for h in self.subscribed_addresses.copy():
params = [h]
k = self.get_index(method, params)
if self.subscriptions.get(k, None):
self.queue_request(method, params)
else:
# If a wallet was closed, we stayed subscribed to its scripthashes
# (there is no way to unsubscribe from a scripthash, unfortunately)
# However, now that we are connecting to a new server, use this
# opportunity to clean house and not subscribe to scripthashes
# for closed wallets. We know a scripthash is defunct if it is
# missing a callback (no entry in self.subscriptions dict).
#self.print_error("removing defunct subscription", h)
self.subscribed_addresses.discard(h)
self.subscriptions.pop(k, None) # it may be an empty list (or missing), so pop it just in case it's a list.
n_defunct += 1
self.print_error('sent subscriptions to', self.interface.server, len(old_reqs),"reqs", len(self.subscribed_addresses), "subs", n_defunct, "defunct subs")
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'blockchain_updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
self._warn_deprecated_callback(key)
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
elif key == 'proxy':
value = (self.proxy and self.proxy.copy()) or None
else:
raise RuntimeError('unexpected trigger key {}'.format(key))
return value
def notify(self, key):
if key in ('updated',):
# Legacy support. Will warn that updated is deprecated.
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self, *, interfaces=False):
"""Returns the servers that are in connected state. Despite its name,
this method does not return the actual interfaces unless interfaces=True,
but rather returns the server:50002:s style string. """
with self.interface_lock:
return list(self.interfaces.values() if interfaces
else self.interfaces.keys())
def get_servers(self):
out = networks.net.DEFAULT_SERVERS.copy()
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server_key):
"""Start the given server if it is not already active or being connected to.
Arguments:
server_key --- server specifier in the form of '<host>:<port>:<protocol>'
"""
if (not server_key in self.interfaces and not server_key in self.connecting):
if server_key == self.default_server:
self.print_error("connecting to %s as new interface" % server_key)
self.set_status('connecting')
self.connecting.add(server_key)
c = Connection(server_key, self.socket_queue, self.config.path,
lambda x: x.bad_certificate.append_weak(self.on_bad_certificate))
def get_unavailable_servers(self):
exclude_set = set(self.interfaces)
exclude_set = exclude_set.union(self.connecting)
exclude_set = exclude_set.union(self.disconnected_servers)
exclude_set = exclude_set.union(self.blacklisted_servers)
return exclude_set
def start_random_interface(self):
exclude_set = self.get_unavailable_servers()
hostmap = self.get_servers() if not self.is_whitelist_only() else self.whitelisted_servers_hostmap
server_key = pick_random_server(hostmap, self.protocol, exclude_set)
if server_key:
self.start_interface(server_key)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = _socksocket_filtered
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
self.notify('proxy')
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
with self.interface_lock:
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
with self.interface_lock:
try:
self.save_parameters(host, port, protocol, proxy, auto_connect)
except ValueError:
return
self.load_parameters()
def save_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
raise ValueError("invalid server or proxy")
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
raise ValueError("changes were not allowed by config")
def load_parameters(self):
server = self.get_config_server()
protocol = deserialize_server(server)[2]
proxy = deserialize_proxy(self.config.get('proxy'))
self.auto_connect = self.config.get('auto_connect', DEFAULT_AUTO_CONNECT)
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server, self.SWITCH_SET_PARAMETERS)
else:
self.switch_lagging_interface()
self.notify('blockchain_updated')
def get_config_server(self):
server = self.config.get('server', None)
if server:
try:
deserialize_server(server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
server = None
wl_only = self.is_whitelist_only()
if (not server) or (server in self.blacklisted_servers) or (wl_only and server not in self.whitelisted_servers):
hostmap = None if not wl_only else self.whitelisted_servers_hostmap
server = pick_random_server(hostmap, exclude_set=self.blacklisted_servers)
return server
def switch_to_random_interface(self):
"""Switch to a random connected server other than the current one"""
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
"""If auto_connect and lagging, switch interface"""
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice, self.SWITCH_LAGGING)
SWITCH_DEFAULT = 'SWITCH_DEFAULT'
SWITCH_RANDOM = 'SWITCH_RANDOM'
SWITCH_LAGGING = 'SWITCH_LAGGING'
SWITCH_SOCKET_LOOP = 'SWITCH_SOCKET_LOOP'
SWITCH_FOLLOW_CHAIN = 'SWITCH_FOLLOW_CHAIN'
SWITCH_SET_PARAMETERS = 'SWITCH_SET_PARAMETERS'
def switch_to_interface(self, server, switch_reason=None):
"""Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface."""
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to '{}' reason '{}'".format(server, switch_reason))
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('blockchain_updated')
def close_interface(self, interface):
if interface:
with self.interface_lock:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, request, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# FIXME:
# Do more to enforce result correctness, has the right data type, etc.
# This code as it stands has been superficially audited for that but I
# suspect it's still possible for a malicious server to cause clients
# to throw up a crash reporter by sending unexpected JSON data types
# or garbage data in the server response.
# We handle some responses; return the rest to the client.
if method == 'server.version':
if isinstance(result, list):
self.on_server_version(interface, result)
elif method == 'blockchain.headers.subscribe':
if error is None:
# on_notify_header below validates result is right type or format
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None and isinstance(result, list):
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None and isinstance(result, str):
# limit banner results to 16kb to avoid minor DoS vector whereby
# server sends a huge block of slow-to-render emojis which
# brings some platforms to thier knees for a few minutes.
self.banner = result[:16384]
self.notify('banner')
elif method == 'server.donation_address':
if error is None and isinstance(result, str):
self.donation_address = result
elif method == 'blockchain.relayfee':
try:
if error is None and isinstance(result, (int, float)):
self.relay_fee = int(result * bitcoin.CASH)
self.print_error("relayfee", self.relay_fee)
except (TypeError, ValueError) as e:
self.print_error("bad server data in blockchain.relayfee:", result, "error:", repr(e))
elif method == 'blockchain.block.headers':
try:
self.on_block_headers(interface, request, response)
except Exception as e:
self.print_error(f"bad server response for {method}: {repr(e)} / {response}")
self.connection_down(interface.server)
elif method == 'blockchain.block.header':
try:
self.on_header(interface, request, response)
except Exception as e:
self.print_error(f"bad server response for {method}: {repr(e)} / {response}")
self.connection_down(interface.server)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
if interface != self.interface:
self.print_error("advisory: response from non-primary {}".format(interface))
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
with self.interface_lock:
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, request, response, callbacks)
def subscribe_to_scripthashes(self, scripthashes, callback):
msgs = [('blockchain.scripthash.subscribe', [sh])
for sh in scripthashes]
self.send(msgs, callback)
def request_scripthash_history(self, sh, callback):
self.send([('blockchain.scripthash.get_history', [sh])], callback)
def send(self, messages, callback):
"""Messages is a list of (method, params) tuples"""
messages = list(messages)
if messages: # Guard against empty message-list which is a no-op and just wastes CPU to enque/dequeue (not even callback is called). I've seen the code send empty message lists before in synchronizer.py
with self.pending_sends_lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.pending_sends_lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions[k] # <-- it's a defaultdict(list)
if callback not in l:
l.append(callback)
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
self.queue_request(method, params, callback = callback)
def _cancel_pending_sends(self, callback):
ct = 0
with self.pending_sends_lock:
for item in self.pending_sends.copy():
messages, _callback = item
if callback == _callback:
self.pending_sends.remove(item)
ct += 1
return ct
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.
It is advised that this function only be called from the network thread
to avoid race conditions.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications, they will be safely ignored as
# no callbacks will exist to process them. For subscriptions we will
# however cache the 'result' hash and feed it back in case a wallet that
# was closed gets reopened (self.sub_cache).
ct = 0
with self.lock:
for k,v in self.subscriptions.copy().items():
if callback in v:
v.remove(callback)
if not v:
# remove empty list
self.subscriptions.pop(k, None)
ct += 1
ct2 = self._cancel_pending_sends(callback)
if ct or ct2:
qname = getattr(callback, '__qualname__', '<unknown>')
self.print_error("Removed {} subscription callbacks and {} pending sends for callback: {}".format(ct, ct2, qname))
def cancel_requests(self, callback):
'''Remove a callback to free object references to enable GC.
It is advised that this function only be called from the network thread
to avoid race conditions.'''
# If the interface ends up answering these requests, they will just
# be safely ignored. This is better than the alternative which is to
# keep references to an object that declared itself defunct.
ct = 0
for message_id, client_req in self.unanswered_requests.copy().items():
if callback == client_req[2]:
self.unanswered_requests.pop(message_id, None) # guard against race conditions here. Note: this usually is called from the network thread but who knows what future programmers may do. :)
ct += 1
ct2 = self._cancel_pending_sends(callback)
if ct or ct2:
qname = getattr(callback, '__qualname__', repr(callback))
self.print_error("Removed {} unanswered client requests and {} pending sends for callback: {}".format(ct, ct2, qname))
def connection_down(self, server, blacklist=False):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
if blacklist:
self.server_set_blacklisted(server, True, save=True, skip_connection_logic=True)
else:
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server_key, socket):
self.add_recent_server(server_key)
interface = Interface(server_key, socket, max_message_bytes=self.MAX_MESSAGE_BYTES, config=self.config)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.set_mode(Interface.MODE_VERIFICATION)
with self.interface_lock:
self.interfaces[server_key] = interface
# server.version should be the first message
params = [version.PACKAGE_VERSION, version.PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
# The interface will immediately respond with it's last known header.
self.queue_request('blockchain.headers.subscribe', [], interface)
if server_key == self.default_server:
self.switch_to_interface(server_key, self.SWITCH_DEFAULT)
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.remove_bad_certificate(server)
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
with self.interface_lock:
interfaces = list(self.interfaces.values())
for interface in interfaces:
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
self.queue_request('server.ping', [], interface)
now = time.time()
# nodes
with self.interface_lock:
server_count = len(self.interfaces) + len(self.connecting)
if server_count < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > self.NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
with self.interface_lock:
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > self.SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server, self.SWITCH_SOCKET_LOOP)
def request_chunk(self, interface, chunk_index):
if chunk_index in self.requested_chunks:
return False
self.requested_chunks.add(chunk_index)
interface.print_error("requesting chunk {}".format(chunk_index))
chunk_base_height = chunk_index * 2016
chunk_count = 2016
return self.request_headers(interface, chunk_base_height, chunk_count, silent=True)
def request_headers(self, interface, base_height, count, silent=False):
if not silent:
interface.print_error("requesting multiple consecutive headers, from {} count {}".format(base_height, count))
if count > 2016:
raise Exception("Server does not support requesting more than 2016 consecutive headers")
top_height = base_height + count - 1
if top_height > networks.net.VERIFICATION_BLOCK_HEIGHT:
if base_height < networks.net.VERIFICATION_BLOCK_HEIGHT:
# As part of the verification process, we fetched the set of headers that allowed manual verification of the post-checkpoint headers that were fetched
# as part of the "catch-up" process. This requested header batch overlaps the checkpoint, so we know we have the post-checkpoint segment from the
# "catch-up". This leaves us needing some header preceding the checkpoint, and we can clip the batch to the checkpoint to ensure we can verify the
# fetched batch, which we wouldn't otherwise be able to do manually as we cannot guarantee we have the headers preceding the batch.
interface.print_error("clipping request across checkpoint height {} ({} -> {})".format(networks.net.VERIFICATION_BLOCK_HEIGHT, base_height, top_height))
verified_count = networks.net.VERIFICATION_BLOCK_HEIGHT - base_height + 1
return self._request_headers(interface, base_height, verified_count, networks.net.VERIFICATION_BLOCK_HEIGHT)
else:
return self._request_headers(interface, base_height, count)
else:
return self._request_headers(interface, base_height, count, networks.net.VERIFICATION_BLOCK_HEIGHT)
def _request_headers(self, interface, base_height, count, checkpoint_height=0):
params = [base_height, count, checkpoint_height]
return self.queue_request('blockchain.block.headers', params, interface) is not None
def on_block_headers(self, interface, request, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
if not request or result is None or params is None or error is not None:
interface.print_error(error or 'bad response')
# Ensure the chunk can be rerequested, but only if the request originated from us.
if request and request[1][0] // 2016 in self.requested_chunks:
self.requested_chunks.remove(request[1][0] // 2016)
return
# Ignore unsolicited chunks
request_params = request[1]
request_base_height = request_params[0]
expected_header_count = request_params[1]
index = request_base_height // 2016
if request_params != params:
interface.print_error("unsolicited chunk base_height={} count={}".format(request_base_height, expected_header_count))
return
if index in self.requested_chunks:
self.requested_chunks.remove(index)
header_hexsize = blockchain.HEADER_SIZE * 2
hexdata = result['hex']
actual_header_count = len(hexdata) // header_hexsize
# We accept less headers than we asked for, to cover the case where the distance to the tip was unknown.
if actual_header_count > expected_header_count:
interface.print_error("chunk data size incorrect expected_size={} actual_size={}".format(expected_header_count * header_hexsize, len(hexdata)))
return
proof_was_provided = False
if 'root' in result and 'branch' in result:
header_height = request_base_height + actual_header_count - 1
header_offset = (actual_header_count - 1) * header_hexsize
header = hexdata[header_offset : header_offset + header_hexsize]
if not self.validate_checkpoint_result(interface, result["root"], result["branch"], header, header_height):
# Got checkpoint validation data, server failed to provide proof.
interface.print_error("disconnecting server for incorrect checkpoint proof")
self.connection_down(interface.server, blacklist=True)
return
data = bytes.fromhex(hexdata)
try:
blockchain.verify_proven_chunk(request_base_height, data)
except blockchain.VerifyError as e:
interface.print_error('disconnecting server for failed verify_proven_chunk: {}'.format(e))
self.connection_down(interface.server, blacklist=True)
return
proof_was_provided = True
elif len(request_params) == 3 and request_params[2] != 0:
# Expected checkpoint validation data, did not receive it.
self.connection_down(interface.server)
return
verification_top_height = self.checkpoint_servers_verified.get(interface.server, {}).get('height', None)
was_verification_request = verification_top_height and request_base_height == verification_top_height - 147 + 1 and actual_header_count == 147
initial_interface_mode = interface.mode
if interface.mode == Interface.MODE_VERIFICATION:
if not was_verification_request:
interface.print_error("disconnecting unverified server for sending unrelated header chunk")
self.connection_down(interface.server, blacklist=True)
return
if not proof_was_provided:
interface.print_error("disconnecting unverified server for sending verification header chunk without proof")
self.connection_down(interface.server, blacklist=True)
return
if not self.apply_successful_verification(interface, request_params[2], result['root']):
return
# We connect this verification chunk into the longest chain.
target_blockchain = self.blockchains[0]
else:
target_blockchain = interface.blockchain
chunk_data = bytes.fromhex(hexdata)
connect_state = (target_blockchain.connect_chunk(request_base_height, chunk_data, proof_was_provided)
if target_blockchain
else blockchain.CHUNK_BAD) # fix #1079 -- invariant is violated here due to extant bugs, so rather than raise an exception, just trigger a connection_down below...
if connect_state == blockchain.CHUNK_ACCEPTED:
interface.print_error("connected chunk, height={} count={} proof_was_provided={}".format(request_base_height, actual_header_count, proof_was_provided))
elif connect_state == blockchain.CHUNK_FORKS:
interface.print_error("identified forking chunk, height={} count={}".format(request_base_height, actual_header_count))
# We actually have all the headers up to the bad point. In theory we
# can use them to detect a fork point in some cases. But that's bonus
# work for someone later.
# Discard the chunk and do a normal search for the fork point.
# Note that this will not give us the right blockchain, the
# syncing does not work that way historically. That might
# wait until either a new block appears, or
if False:
interface.blockchain = None
interface.set_mode(Interface.MODE_BACKWARD)
interface.bad = request_base_height + actual_header_count - 1
interface.bad_header = blockchain.HeaderChunk(request_base_height, chunk_data).get_header_at_height(interface.bad)
self.request_header(interface, min(interface.tip, interface.bad - 1))
return
else:
interface.print_error("discarded bad chunk, height={} count={} reason={}".format(request_base_height, actual_header_count, connect_state))
self.connection_down(interface.server)
return
# This interface was verified above. Get it syncing.
if initial_interface_mode == Interface.MODE_VERIFICATION:
self._process_latest_tip(interface)
return
# If not finished, get the next chunk.
if proof_was_provided and not was_verification_request:
# the verifier must have asked for this chunk. It has been overlaid into the file.
pass
else:
if interface.blockchain.height() < interface.tip:
self.request_headers(interface, request_base_height + actual_header_count, 2016)
else:
interface.set_mode(Interface.MODE_DEFAULT)
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.notify('blockchain_updated')
def request_header(self, interface, height):
"""
This works for all modes except for 'default'.
If it is to be used for piecemeal filling of the sparse blockchain
headers file before the checkpoint height, it needs extra
handling for the 'default' mode.
A server interface does not get associated with a blockchain
until it gets handled in the response to its first header
request.
"""
interface.print_error(f"requesting header {height}")
if height > networks.net.VERIFICATION_BLOCK_HEIGHT:
params = [height]
else:
params = [height, networks.net.VERIFICATION_BLOCK_HEIGHT]
self.queue_request('blockchain.block.header', params, interface)
return True
def on_header(self, interface, request, response):
"""Handle receiving a single block header"""
result = response.get('result')
if not result:
interface.print_error(response)
self.connection_down(interface.server)
return
if not request:
interface.print_error("disconnecting server for sending unsolicited header, no request, params={}".format(response['params']), blacklist=True)
self.connection_down(interface.server)
return
request_params = request[1]
height = request_params[0]
response_height = response['params'][0]
# This check can be removed if request/response params are reconciled in some sort of rewrite.
if height != response_height:
interface.print_error("unsolicited header request={} request_height={} response_height={}".format(request_params, height, response_height))
self.connection_down(interface.server)
return
proof_was_provided = False
hexheader = None
if 'root' in result and 'branch' in result and 'header' in result:
hexheader = result["header"]
if not self.validate_checkpoint_result(interface, result["root"], result["branch"], hexheader, height):
# Got checkpoint validation data, failed to provide proof.
interface.print_error("unprovable header request={} height={}".format(request_params, height))
self.connection_down(interface.server)
return
proof_was_provided = True
else:
hexheader = result
# Simple header request.
header = blockchain.deserialize_header(bytes.fromhex(hexheader), height)
# Is there a blockchain that already includes this header?
chain = blockchain.check_header(header)
if interface.mode == Interface.MODE_BACKWARD:
if chain:
interface.print_error("binary search")
interface.set_mode(Interface.MODE_BINARY)
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
# A backwards header request should not happen before the
# checkpoint height. It isn't requested in this context, and it
# isn't requested anywhere else. If this happens it is an error.
# Additionally, if the checkpoint height header was requested
# and it does not connect, then there's not much Electron Cash
# can do about it (that we're going to bother). We depend on the
# checkpoint being relevant for the blockchain the user is
# running against.
if height <= networks.net.VERIFICATION_BLOCK_HEIGHT:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
# If the longest chain does not connect at any point we check to the chain this interface is
# serving, then we fall back on the checkpoint height which is expected to work.
next_height = max(networks.net.VERIFICATION_BLOCK_HEIGHT, interface.tip - 2 * delta)
elif interface.mode == Interface.MODE_BINARY:
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write(b'', 0)
branch.save_header(interface.bad_header)
interface.set_mode(Interface.MODE_CATCH_UP)
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.base_height)
interface.set_mode(Interface.MODE_CATCH_UP)
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.set_mode(Interface.MODE_CATCH_UP)
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('blockchain_updated')
elif interface.mode == Interface.MODE_CATCH_UP:
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.set_mode(Interface.MODE_BACKWARD)
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('blockchain_updated')
elif interface.mode == Interface.MODE_DEFAULT:
interface.print_error("ignored header {} received in default mode".format(height))
return
# If not finished, get the next header
if next_height:
if interface.mode == Interface.MODE_CATCH_UP and interface.tip > next_height:
self.request_headers(interface, next_height, 2016)
else:
self.request_header(interface, next_height)
else:
interface.set_mode(Interface.MODE_DEFAULT)
self.notify('blockchain_updated')
# refresh network dialog
self.notify('interfaces')
def find_bad_fds_and_kill(self):
bad = []
with self.interface_lock:
for s,i in self.interfaces.copy().items():
try:
r, w, x = select.select([i],[i],[],0) # non-blocking select to test if fd's are good.
except (OSError, ValueError):
i.print_error("Bad file descriptor {}, closing".format(i.fileno()))
self.connection_down(s)
bad.append(i)
if bad:
self.print_error("{} bad file descriptors detected and shut down: {}".format(len(bad), bad))
return bad
def wait_on_sockets(self):
def try_to_recover(err):
self.print_error("wait_on_sockets: {} raised by select() call.. trying to recover...".format(err))
self.find_bad_fds_and_kill()
rin = []
win = []
r_immed = []
with self.interface_lock:
interfaces = list(self.interfaces.values())
for interface in interfaces:
if interface.fileno() < 0:
continue
read_pending, write_pending = interface.pipe.get_selectloop_info()
if read_pending:
r_immed.append(interface)
else:
rin.append(interface)
if write_pending or interface.num_requests():
win.append(interface)
timeout = 0 if r_immed else 0.1
try:
# Python docs say Windows doesn't like empty selects.
if win or rin:
rout, wout, xout = select.select(rin, win, [], timeout)
else:
rout = wout = xout = ()
if timeout:
# Sleep to prevent busy looping
time.sleep(timeout)
except socket.error as e:
code = None
if isinstance(e, OSError): # Should always be the case unless ancient python3
code = e.errno
if code == errno.EINTR:
return # calling loop will try again later
elif code == errno.EBADF:
# A filedescriptor was closed from underneath us because we have race conditions in this class. :(
# Note that due to race conditions with the gui thread even with the checks above it's entirely possible
# for the socket fd to become -1, or to be not -1 but still be invalid/closed.
try_to_recover("EBADF")
return # calling loop will try again later
raise # ruh ruh. user will get a crash dialog screen and network will die. FIXME: figure out a way to restart network..
except ValueError:
# Note sometimes select() ends up getting a file descriptor that's -1 because race conditions, in which case it raises
# ValueError
try_to_recover("ValueError")
return # calling loop will try again later
assert not xout
for interface in wout:
if not interface.send_requests():
self.connection_down(interface.server)
for interface in r_immed:
self.process_responses(interface)
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
filename = b.path()
# NB: HEADER_SIZE = 80 bytes
length = blockchain.HEADER_SIZE * (networks.net.VERIFICATION_BLOCK_HEIGHT + 1)
if not os.path.exists(filename) or os.path.getsize(filename) < length:
with open(filename, 'wb') as f:
if length>0:
f.seek(length-1)
f.write(b'\x00')
util.ensure_sparse_file(filename)
with b.lock:
b.update_size()
def run(self):
b = self.blockchains[0]
header = None
if networks.net.VERIFICATION_BLOCK_HEIGHT is not None:
self.init_headers_file()
header = b.read_header(networks.net.VERIFICATION_BLOCK_HEIGHT)
if header is not None:
self.verified_checkpoint = True
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
if self.verified_checkpoint:
self.run_jobs() # Synchronizer and Verifier and Fx
self.process_pending_sends()
self.stop_network()
self.tor_controller.active_port_changed.remove(self.on_tor_port_changed)
self.tor_controller.stop()
self.tor_controller = None
self.on_stop()
def on_server_version(self, interface, version_data):
interface.server_version = version_data
def on_notify_header(self, interface, header_dict):
"""
When we subscribe for 'blockchain.headers.subscribe', a server will send
us it's topmost header. After that, it will forward on any additional
headers as it receives them.
"""
if (not isinstance(header_dict, dict)
or 'hex' not in header_dict or 'height' not in header_dict):
# bad and/or unexpected response from server.
self.connection_down(interface.server)
return
header_hex = header_dict['hex']
height = header_dict['height']
header = blockchain.deserialize_header(bytes.fromhex(header_hex),
height)
# If the server is behind the verification height, then something is wrong with it. Drop it.
if networks.net.VERIFICATION_BLOCK_HEIGHT is not None and height <= networks.net.VERIFICATION_BLOCK_HEIGHT:
self.connection_down(interface.server)
return
# We will always update the tip for the server.
interface.tip_header = header
interface.tip = height
if interface.mode == Interface.MODE_VERIFICATION:
# If the server has already had this requested, this will be a no-op.
self.request_initial_proof_and_headers(interface)
return
self._process_latest_tip(interface)
def _process_latest_tip(self, interface):
if interface.mode != Interface.MODE_DEFAULT:
return
header = interface.tip_header
height = interface.tip
b = blockchain.check_header(header) # Does it match the hash of a known header.
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('blockchain_updated')
self.notify('interfaces')
return
b = blockchain.can_connect(header) # Is it the next header on a given blockchain.
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('blockchain_updated')
self.notify('interfaces')
return
heights = [x.height() for x in self.blockchains.values()]
tip = max(heights)
if tip > networks.net.VERIFICATION_BLOCK_HEIGHT:
interface.print_error("attempt to reconcile longest chain tip={} heights={}".format(tip, heights))
interface.set_mode(Interface.MODE_BACKWARD)
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip, height - 1))
else:
interface.print_error("attempt to catch up tip={} heights={}".format(tip, heights))
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.set_mode(Interface.MODE_CATCH_UP)
interface.blockchain = chain
interface.print_error("switching to catchup mode", tip)
self.request_header(interface, networks.net.VERIFICATION_BLOCK_HEIGHT + 1)
else:
interface.print_error("chain already catching up with", chain.catch_up.server)
def request_initial_proof_and_headers(self, interface):
# This will be the initial topmost header response. But we might get new blocks.
if interface.server not in self.checkpoint_servers_verified:
interface.print_error("request_initial_proof_and_headers pending")
top_height = self.checkpoint_height
# If there is no known checkpoint height for this network, we look to get
# a given number of confirmations for the same conservative height.
if self.checkpoint_height is None:
self.checkpoint_height = interface.tip - 100
self.checkpoint_servers_verified[interface.server] = { 'root': None, 'height': self.checkpoint_height }
# We need at least 147 headers before the post checkpoint headers for daa calculations.
self._request_headers(interface, self.checkpoint_height - 147 + 1, 147, self.checkpoint_height)
else:
# We already have them verified, maybe we got disconnected.
interface.print_error("request_initial_proof_and_headers bypassed")
interface.set_mode(Interface.MODE_DEFAULT)
self._process_latest_tip(interface)
def apply_successful_verification(self, interface, checkpoint_height, checkpoint_root):
known_roots = [ v['root'] for v in self.checkpoint_servers_verified.values() if v['root'] is not None ]
if len(known_roots) > 0 and checkpoint_root != known_roots[0]:
interface.print_error("server sent inconsistent root '{}'".format(checkpoint_root))
self.connection_down(interface.server)
return False
self.checkpoint_servers_verified[interface.server]['root'] = checkpoint_root
# rt12 --- checkpoint generation currently disabled.
if False:
interface.print_error("received verification {}".format(self.verifications_required))
self.verifications_required -= 1
if self.verifications_required > 0:
return False
if networks.net.VERIFICATION_BLOCK_HEIGHT is None:
networks.net.VERIFICATION_BLOCK_HEIGHT = checkpoint_height
networks.net.VERIFICATION_BLOCK_MERKLE_ROOT = checkpoint_root
network_name = "TESTNET" if networks.net.TESTNET else "MAINNET"
self.print_error("found verified checkpoint for {} at height {} with merkle root {!r}".format(network_name, checkpoint_height, checkpoint_root))
if not self.verified_checkpoint:
self.init_headers_file()
self.verified_checkpoint = True
# rt12 --- checkpoint generation currently disabled.
if False:
with self.interface_lock:
interfaces = list(self.interfaces.values())
for interface_entry in interfaces:
interface_entry.blockchain = self.blockchains[0]
interface_entry.set_mode(Interface.MODE_DEFAULT)
interface.print_error("server was verified correctly")
interface.set_mode(Interface.MODE_DEFAULT)
return True
def validate_checkpoint_result(self, interface, merkle_root, merkle_branch, header, header_height):
"""
header: hex representation of the block header.
merkle_root: hex representation of the server's calculated merkle root.
branch: list of hex representations of the server's calculated merkle root branches.
Returns a boolean to represent whether the server's proof is correct.
"""
received_merkle_root = bytes(reversed(bytes.fromhex(merkle_root)))
if networks.net.VERIFICATION_BLOCK_MERKLE_ROOT:
expected_merkle_root = bytes(reversed(
bytes.fromhex(networks.net.VERIFICATION_BLOCK_MERKLE_ROOT)))
else:
expected_merkle_root = received_merkle_root
if received_merkle_root != expected_merkle_root:
interface.print_error("Sent unexpected merkle root, expected: {}, got: {}".format(networks.net.VERIFICATION_BLOCK_MERKLE_ROOT, merkle_root))
return False
header_hash = bitcoin.Hash(bytes.fromhex(header))
byte_branches = [ bytes(reversed(bytes.fromhex(v))) for v in merkle_branch ]
proven_merkle_root = blockchain.root_from_proof(header_hash, byte_branches, header_height)
if proven_merkle_root != expected_merkle_root:
interface.print_error("Sent incorrect merkle branch, expected: {}, proved: {}".format(networks.net.VERIFICATION_BLOCK_MERKLE_ROOT, util.hfu(reversed(proven_merkle_root))))
return False
return True
def blockchain(self):
with self.interface_lock:
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.base_height
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
with self.interface_lock:
interfaces = list(self.interfaces.values())
for i in interfaces:
if i.blockchain == blockchain:
self.switch_to_interface(i.server, self.SWITCH_FOLLOW_CHAIN)
break
else:
raise BaseException('blockchain not found', index)
with self.interface_lock:
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise util.TimeoutException('Server did not answer')
if r.get('error'):
raise util.ServerError(r.get('error'))
return r.get('result')
def get_raw_tx_for_txid(self, txid, timeout=30):
""" Used by UI code to retrieve a transaction from the blockchain by
txid. (Qt Gui: Tools -> Load transaction -> From the blockchain)
param: txid, a transaction hash
returns: tuple(True, raw_tx) on success
tuple(False, error_msg) on failure.
error_msg is suitable to be displayed in a UI as it is not
a server string, but rather an error based on what the server
replied with (with a generic fallback message is used
if the server message is not recognized). """
txid = str(txid).strip()
try:
r = self.synchronous_get(('blockchain.transaction.get',[txid]), timeout=timeout)
return True, r
except BaseException as e:
self.print_error("Exception retrieving transaction for '{}': {}".format(txid, repr(e)))
msg = str(e).lower().strip()
if 'should be a transaction hash' in msg:
msg = _("Input data is not a transaction hash.")
elif 'still in the process of being indexed' in msg:
msg = _("This server is still indexing transactions. You should switch to another server for now.")
elif 'no such' in msg:
msg = _("No such mempool or blockchain transaction exists.")
elif 'did not answer' in msg:
msg = _("The server did not answer; network may be down.")
else:
# fall back to something generic.
msg = _("Could not retrieve transaction for the specified hash.")
return False, msg
@staticmethod
def __wait_for(it, timeout=30):
"""Wait for the result of calling lambda `it`.
Will raise util.TimeoutException or util.ServerErrorResponse on failure."""
q = queue.Queue()
it(q.put)
try:
result = q.get(block=True, timeout=(timeout or 0.010)) # does not support non-blocking
except queue.Empty:
raise util.TimeoutException(_('Server did not answer'))
if result.get('error'):
raise util.ServerErrorResponse(_("Server returned an error response"), result.get('error'))
return result.get('result')
@staticmethod
def __with_default_synchronous_callback(invocation, callback):
""" Use this method if you want to make the network request
synchronous. """
if not callback:
return Network.__wait_for(invocation)
invocation(callback)
def broadcast_transaction(self, transaction, callback=None):
""" This is the legacy EC/Electrum API that we still need to support
for plugins and other code, but it has been improved to not allow for
phishing attacks by calling broadcast_transaction2 which actually
deduces a more intelligent and phishing-proof error message.
If you want the actual server response, use broadcast_transaction2 and
catch exceptions. """
if callback:
command = 'blockchain.transaction.broadcast'
self.send([(command, [str(transaction)])], callback)
return
try:
out = self.broadcast_transaction2(transaction)
except BaseException as e: #catch-all. May be util.TimeoutException, util.ServerError subclass or other.
return False, "error: " + str(e) # Ergh. To remain compatible with old code we prepend this ugly "error: "
return True, out
def broadcast_transaction2(self, transaction, timeout=30):
""" Very similar to broadcast_transation() but it actually tells calling
code what the nature of the error was in a more explicit manner by
raising an Exception. Normally a util.TimeoutException,
util.TxHashMismatch, or util.ServerErrorResonse is raised on broadcast
error or warning. TxHashMismatch indicates the broadcast succeeded
but that the tx hash returned by the server does not match the tx hash
of the specified transaction. All other exceptions indicate no broadcast
has successfully occurred.
Does not support using a callback function."""
command = 'blockchain.transaction.broadcast'
invocation = lambda c: self.send([(command, [str(transaction)])], c)
try:
out = Network.__wait_for(invocation, timeout=timeout) # may raise util.TimeoutException, util.ServerErrorResponse
except util.ServerErrorResponse as e:
# rephrase the generic message to something more suitable
self.print_error("Server error response was:", str(e.server_msg))
raise util.ServerErrorResponse(Network.transmogrify_broadcast_response_for_gui(e.server_msg), e.server_msg)
if out != transaction.txid():
self.print_error("Server replied with a mismatching txid:", str(out))
raise util.TxHashMismatch(_("Server response does not match signed transaction ID."), str(out))
return out
@staticmethod
def transmogrify_broadcast_response_for_gui(server_msg):
# NB: the server_msg is usually a dict but not always.
# Unfortunately, ElectrumX doesn't return a good error code. It's always '1'.
# So, we must use substring matching to grok the error message.
# We do NOT ever want to print to the user the server message as this has potential for a phishing exploit.
# See: https://github.com/spesmilo/electrum/issues/4968
# So.. these messages mostly come from groking the source code of BU and Bitcoin ABC. If that fails,
# a generic error string is returned.
if not isinstance(server_msg, str):
server_msg = str(server_msg)
server_msg = server_msg.replace("\n", r"\n") # replace \n with slash-n because dict does this.
if r'dust' in server_msg:
dust_thold = 546
try:
from .wallet import dust_threshold
dust_thold = dust_threshold(Network.get_instance())
except: pass
return _("Transaction could not be broadcast due to dust outputs (dust threshold is {} satoshis).").format(dust_thold)
elif r'Missing inputs' in server_msg or r'Inputs unavailable' in server_msg or r"bad-txns-inputs-spent" in server_msg or r"bad-txns-inputs-missingorspent" in server_msg:
return _("Transaction could not be broadcast due to missing, already-spent, or otherwise invalid inputs.")
elif r"transaction already in block chain" in server_msg:
# We get this message whenever any of this transaction's outputs are already in confirmed utxo set (and are unspent).
# For confirmed txn with all outputs already spent, we will see "missing inputs" instead.
return _("The transaction already exists in the blockchain.")
elif r'insufficient priority' in server_msg or r'rate limited free transaction' in server_msg or r'min relay fee not met' in server_msg:
return _("The transaction was rejected due to paying insufficient fees.")
elif r'mempool min fee not met' in server_msg or r"mempool full" in server_msg:
return _("The transaction was rejected due to paying insufficient fees (possibly due to network congestion).")
elif r'bad-txns-premature-spend-of-coinbase' in server_msg:
return _("Transaction could not be broadcast due to an attempt to spend a coinbase input before maturity.")
elif r"txn-already-in-mempool" in server_msg or r"txn-already-known" in server_msg:
return _("The transaction already exists in the server's mempool.")
elif r"txn-mempool-conflict" in server_msg:
return _("The transaction conflicts with a transaction already in the server's mempool.")
elif r'too-long-mempool-chain' in server_msg:
return _("The transaction was rejected due to having too many mempool ancestors. Wait for confirmations and try again.")
elif r"bad-txns-nonstandard-inputs" in server_msg:
return _("The transaction was rejected due to its use of non-standard inputs.")
elif r"absurdly-high-fee" in server_msg:
return _("The transaction was rejected because it specifies an absurdly high fee.")
elif r"non-mandatory-script-verify-flag" in server_msg or r"mandatory-script-verify-flag-failed" in server_msg or r"upgrade-conditional-script-failure" in server_msg:
return _("The transaction was rejected due to an error in script execution.")
elif r"tx-size" in server_msg or r"bad-txns-oversize" in server_msg:
return _("The transaction was rejected because it is too large (in bytes).")
elif r"scriptsig-size" in server_msg:
return _("The transaction was rejected because it contains a script that is too large.")
elif r"scriptpubkey" in server_msg:
return _("The transaction was rejected because it contains a non-standard output script.")
elif r"bare-multisig" in server_msg:
return _("The transaction was rejected because it contains a bare multisig output.")
elif r"multi-op-return" in server_msg:
return _("The transaction was rejected because it contains multiple OP_RETURN outputs.")
elif r"scriptsig-not-pushonly" in server_msg:
return _("The transaction was rejected because it contains non-push-only script sigs.")
elif r'bad-txns-nonfinal' in server_msg or r'non-BIP68-final' in server_msg:
return _("The transaction was rejected because it is not considered final according to network rules.")
elif r"bad-txns-too-many-sigops" in server_msg or r"bad-txn-sigops" in server_msg:
# std limit is 4000; this is basically impossible to reach on mainnet using normal txes, due to the 100kB size limit.
return _("The transaction was rejected because it contains too many signature-check opcodes.")
elif r"bad-txns-inputvalues-outofrange" in server_msg or r"bad-txns-vout-negative" in server_msg or r"bad-txns-vout-toolarge" in server_msg or r"bad-txns-txouttotal-toolarge" in server_msg:
return _("The transaction was rejected because its amounts are out of range.")
elif r"bad-txns-in-belowout" in server_msg or r"bad-txns-fee-outofrange" in server_msg:
return _("The transaction was rejected because it pays a negative or huge fee.")
elif r"bad-tx-coinbase" in server_msg:
return _("The transaction was rejected because it is a coinbase transaction.")
elif r"bad-txns-prevout-null" in server_msg or r"bad-txns-inputs-duplicate" in server_msg:
return _("The transaction was rejected because it contains null or duplicate inputs.")
elif r"bad-txns-vin-empty" in server_msg or r"bad-txns-vout-empty" in server_msg:
return _("The transaction was rejected because it is has no inputs or no outputs.")
elif r"bad-txns-undersize" in server_msg:
return _("The transaction was rejected because it is too small.")
elif r'version' in server_msg:
return _("The transaction was rejected because it uses a non-standard version.")
elif r'TX decode failed' in server_msg:
return _("The transaction could not be decoded.")
return _("An error occurred broadcasting the transaction")
# Used by the verifier job.
def get_merkle_for_transaction(self, tx_hash, tx_height, callback, max_qlen=10):
""" Asynchronously enqueue a request for a merkle proof for a tx.
Note that the callback param is required.
May return None if too many requests were enqueued (max_qlen) or
if there is no interface.
Client code should handle the None return case appropriately. """
return self.queue_request('blockchain.transaction.get_merkle',
[tx_hash, tx_height],
callback=callback, max_qlen=max_qlen)
def get_proxies(self):
""" Returns a proxies dictionary suitable to be passed to the requests
module, or None if no proxy is set for this instance. """
proxy = self.proxy and self.proxy.copy() # retain a copy in case another thread messes with it
if proxy:
pre = ''
# proxies format for requests lib is eg:
# {
# 'http' : 'socks[45]://user:password@host:port',
# 'https' : 'socks[45]://user:password@host:port'
# }
# with user:password@ being omitted if no user/password.
if proxy.get('user') and proxy.get('password'):
pre = '{}:{}@'.format(proxy.get('user'), proxy.get('password'))
mode = proxy.get('mode')
if mode and mode.lower() == "socks5":
mode += 'h' # socks5 with hostname resolution on the server side so it works with tor & even onion!
socks = '{}://{}{}:{}'.format(mode, pre, proxy.get('host'), proxy.get('port'))
proxies = { # transform it to requests format
'http' : socks,
'https' : socks
}
return proxies
return None
def on_bad_certificate(self, server, certificate):
if server in self.bad_certificate_servers:
return
self.bad_certificate_servers[server] = certificate
self.server_list_updated()
def remove_bad_certificate(self, server):
if server not in self.bad_certificate_servers:
return
del self.bad_certificate_servers[server]
self.server_list_updated()
def remove_pinned_certificate(self, server):
cert_file = self.bad_certificate_servers.get(server)
if not cert_file:
return False
try:
os.unlink(cert_file)
self.print_error("Removed pinned certificate:", cert_file)
except OSError as e:
self.print_error("Could not remove pinned certificate:", cert_file, repr(e))
if os.path.exists(cert_file):
# Don't remove from bad certificate list if we failed to unpin
return False
self.remove_bad_certificate(server)
return True
def server_is_bad_certificate(self, server): return server in self.bad_certificate_servers
def server_set_blacklisted(self, server, b, save=True, skip_connection_logic=False):
assert isinstance(server, str)
if b:
self.blacklisted_servers |= {server}
else:
self.blacklisted_servers -= {server}
self.config.set_key("server_blacklist", list(self.blacklisted_servers), save)
if b and not skip_connection_logic and server in self.interfaces:
self.connection_down(server, False) # if blacklisting, this disconnects (if we were connected)
def server_is_blacklisted(self, server): return server in self.blacklisted_servers
def server_set_whitelisted(self, server, b, save=True):
assert isinstance(server, str)
adds = set(self.config.get('server_whitelist_added', []))
rems = set(self.config.get('server_whitelist_removed', []))
is_hardcoded = server in self._hardcoded_whitelist
s = {server} # make a set so |= and -= work
len0 = len(self.whitelisted_servers)
if b:
# the below logic keeps the adds list from containing redundant 'whitelisted' servers that are already defined in servers.json
# it also makes it so that if the developers remove a server from servers.json, it goes away from the whitelist automatically.
if is_hardcoded:
adds -= s # it's in the hardcoded list anyway, remove it from adds to keep adds from being redundant
else:
adds |= s # it's not a hardcoded server, add it to 'adds'
rems -= s
self.whitelisted_servers |= s
else:
adds -= s
if is_hardcoded:
rems |= s # it's in the hardcoded set, so it needs to explicitly be added to the 'rems' set to be taken out of the dynamically computed whitelist (_compute_whitelist())
else:
rems -= s # it's not in the hardcoded list, so no need to add it to the rems as it will be not whitelisted on next run since it's gone from 'adds'
self.whitelisted_servers -= s
if len0 != len(self.whitelisted_servers):
# it changed. So re-cache hostmap which we use as an argument to pick_random_server() elsewhere in this class
self.whitelisted_servers_hostmap = servers_to_hostmap(self.whitelisted_servers)
self.config.set_key('server_whitelist_added', list(adds), save)
self.config.set_key('server_whitelist_removed', list(rems), save)
def server_is_whitelisted(self, server): return server in self.whitelisted_servers
def _compute_whitelist(self):
if not hasattr(self, '_hardcoded_whitelist'):
self._hardcoded_whitelist = frozenset(hostmap_to_servers(networks.net.DEFAULT_SERVERS))
ret = set(self._hardcoded_whitelist)
ret |= set(self.config.get('server_whitelist_added', [])) # this key is all the servers that weren't in the hardcoded whitelist that the user explicitly added
ret -= set(self.config.get('server_whitelist_removed', [])) # this key is all the servers that were hardcoded in the whitelist that the user explicitly removed
return ret, servers_to_hostmap(ret)
def is_whitelist_only(self):
return bool(self.config.get('whitelist_servers_only', DEFAULT_WHITELIST_SERVERS_ONLY))
def set_whitelist_only(self, b):
if bool(b) == self.is_whitelist_only():
return # disallow redundant/noop calls
self.config.set_key('whitelist_servers_only', b, True)
if b:
with self.interface_lock:
# now, disconnect from all non-whitelisted servers
for s in self.interfaces.copy():
if s not in self.whitelisted_servers:
self.connection_down(s)
| 48.265226
| 210
| 0.629086
|
4a1925abf207946c8961a7053be98febf5ab4e6b
| 1,244
|
py
|
Python
|
apprtc/fabfile.py
|
walterfan/webrtc_snippets
|
6964560a91ad3338fe8c50a9a250d0a0978eb42a
|
[
"Apache-2.0"
] | null | null | null |
apprtc/fabfile.py
|
walterfan/webrtc_snippets
|
6964560a91ad3338fe8c50a9a250d0a0978eb42a
|
[
"Apache-2.0"
] | null | null | null |
apprtc/fabfile.py
|
walterfan/webrtc_snippets
|
6964560a91ad3338fe8c50a9a250d0a0978eb42a
|
[
"Apache-2.0"
] | null | null | null |
from asyncio import tasks
from fabric.api import *
from fabric.context_managers import *
from fabric.contrib.console import confirm
from datetime import date
from sys import platform
import os, subprocess
import psutil
import platform
BASE_PATH = os.path.dirname(__file__)
appserver_ports = {}
"""
Linux: Linux
Mac: Darwin
Windows: Windows
"""
@task
def test():
os_type = platform.system()
print(os_type)
@task
def start_server(local_ip=None):
if not local_ip:
cmd0 = "ifconfig en0 | grep inet | awk '$1==\"inet\" {print $2}'"
stdoutput = subprocess.check_output(cmd0, shell=True)
local_ip = stdoutput.decode('utf-8')
print(local_ip)
os_type = platform.system()
if os_type == 'Darwin':
cmd1 = """
docker run --rm \
-p 8080:8080 -p 8089:8089 -p 3478:3478 -p 3478:3478/udp -p 3033:3033 \
-p 59000-65000:59000-65000/udp \
-e PUBLIC_IP=%s \
-it piasy/apprtc-server
"""
local(cmd1.strip() % local_ip.strip())
elif os_type == 'Linux':
cmd1 = "docker run --rm --net=host \
-e PUBLIC_IP=%s \
-it piasy/apprtc-server"
print(cmd1.strip() % local_ip.strip())
| 24.392157
| 82
| 0.610129
|
4a19261fa844d1ce31be1c9d05b03407baed57c7
| 8,552
|
py
|
Python
|
pygeotoolbox/workflow/rest/workflows.py
|
raugustyn/doctest
|
c37b0e2fa11ebd30d600923020b1ce44145a0250
|
[
"MIT"
] | null | null | null |
pygeotoolbox/workflow/rest/workflows.py
|
raugustyn/doctest
|
c37b0e2fa11ebd30d600923020b1ce44145a0250
|
[
"MIT"
] | null | null | null |
pygeotoolbox/workflow/rest/workflows.py
|
raugustyn/doctest
|
c37b0e2fa11ebd30d600923020b1ce44145a0250
|
[
"MIT"
] | null | null | null |
#!c:/python27/python.exe
# -*- coding: utf-8 -*-
from workflow.workflowsequence import WorkflowSequence
__version__ = 0.1
import sys, os, urllib, codecs
import web # http://webpy.org/cookbook/
import sharedtools
TEPMLATE_RequestNotHandled = """
<p style="text-align:left">
<table>
<tr><td><a href="Workflows">Workflows</a></td><td>Registered workflows</td></tr>
</table>
<hr>
<h4>Request Details</h4>
<p>Path: #PATH#</p>
<table>
#QUERY_LINES#
</table>
</p>
"""
import sharedtools.log as log
if __name__ == "__main__":
logger = log.createLogger("workflows")
from workflow import getRegisteredWorkflows, WorkflowItem
from workflow import buildHTMLPageContent
from workflow.rest import buildFloatingListPage
import sharedtools.config as config
workflowItems = None
defaultPath = "workflows"
__paths = { }
def registerPath(path, requestProcessor):
global __paths
path = path.lower()
__paths[path] = requestProcessor
class HTTPResponse():
def __init__(self, handled, mimeFormat = "text/html", htmlData = ""):
self.handled = handled
self.mimeFormat = mimeFormat
self.htmlData = htmlData
urls = ('/(.*)', 'handler', 'createsituation')
def ProcessRequest(fullPathList, queryParams, response):
global _registeredRunObjects, workflowItems
if fullPathList:
mainPath = fullPathList[0].lower()
if mainPath == "workflows":
if len(fullPathList) == 1:
if len(workflowItems) == 0:
html = buildHTMLPageContent("Registrovaná workflow", "Žádná workflow nejsou registrována")
else:
content = []
for workflowItem in workflowItems:
content.append({
"title" : workflowItem.caption,
"shortdesc" : workflowItem.getShortDescription(),
"link": 'workflows/' + workflowItem.id
})
html = buildFloatingListPage("Registrovaná workflow", "", content)
else:
workflowItem = WorkflowItem.getItemById(fullPathList[1])
if workflowItem:
if len(fullPathList) == 3 and fullPathList[2].lower() == "execute":
startNumberOfLines = log.logger.getNumberOfLines()
workflowItem.execute()
lines = log.logger.getLines(startNumberOfLines-1, log.logger.getNumberOfLines()-1)
html = lines;
elif len(fullPathList) == 3 and fullPathList[2].lower() == "validate":
isValid, message = workflowItem.validate()
html = "%s:%s" % (str(isValid), message)
else:
html = workflowItem.getHTML()
else:
html = buildHTMLPageContent("Requested Workflow", "%s not found!!!" % fullPathList[1])
elif mainPath == "config":
content = []
for info in config.valueInfos.values():
content.append({
"title": info.caption,
"shortdesc" : info.getFullDescription(),
"link": ""
})
html = buildFloatingListPage("Konfigurace", "", content)
html = html.replace("height: 140px;", "height: 260px;")
elif mainPath == "console":
html = log.logger.getLines(0, None, "\n")
response.mimeFormat = "text/plain"
elif mainPath == "statusinfo":
html = sharedtools.getStatusInfo()
response.mimeFormat = "text/plain"
elif mainPath == "values":
if len(fullPathList) == 3 and fullPathList[2].lower() == "set":
valueName = fullPathList[1]
value = urllib.unquote(queryParams["newValue"])
html = "%s=%s" % (valueName, value)
config.setValue(valueName, value)
config.save()
elif mainPath in __paths:
response = __paths[mainPath](fullPathList[1:], queryParams, response)
else:
html = "Empty"
if not response.handled:
response.handled = True
response.htmlData = html
if not response.handled:
response.handled = True
queryLines = ""
for key, value in queryParams.iteritems():
queryLines += "%s %s" % (key, value)
if not queryLines:
queryLines = "No query params provided."
dynamicContent = TEPMLATE_RequestNotHandled
dynamicContent = dynamicContent.replace("#QUERY_LINES#", queryLines)
page = "/".join(fullPathList)
if not page:
page = "No script path provided in query."
dynamicContent = dynamicContent.replace("#PATH#", page)
html = buildHTMLPageContent("Workflow Manager", dynamicContent)
response.htmlData = html
return response
class WorkflowsApplication(web.application):
def run(self, port, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ("localhost", port))
class handler:
def doProcessRequest(self, page):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
if not page and defaultPath:
web.header("Content-Type", "text/html;charset=utf-8")
web.redirect(defaultPath)
return None
else:
response = ProcessRequest(page.split("/"), web.input(), HTTPResponse(False))
web.header("Content-Type", response.mimeFormat + ";charset=utf-8")
return response.htmlData
def GET(self, page):
return self.doProcessRequest(page)
def POST(self, page):
return self.doProcessRequest(page)
def runServer(loggerName="RunWorkflowServer", port=65423, registeredWorkflows = None, aDefaultPath = None):
global workflowItems, defaultPath
if loggerName:
import sharedtools.log as log
logger = log.createLogger(loggerName)
logger.debug("Configuration file:%s" % config.configFileName)
if registeredWorkflows:
if isinstance(registeredWorkflows, list):
workflowItems = registeredWorkflows
elif isinstance(registeredWorkflows, WorkflowItem) or isinstance(registeredWorkflows, WorkflowSequence):
workflowItems = [registeredWorkflows]
defaultPath = "workflows/" + registeredWorkflows.id
else:
workflowItems = getRegisteredWorkflows()
if aDefaultPath:
defaultPath = aDefaultPath
import os
if os.environ.has_key('SERVER_SOFTWARE'):
import cgi
import cgitb
cgitb.enable()
form = cgi.FieldStorage()
if os.environ.has_key('PATH_INFO'):
pathInfo = os.environ['PATH_INFO']
else:
pathInfo = ""
if pathInfo[:1] == "/": pathInfo = pathInfo[1:]
fullPathList = pathInfo.replace("//", "/")
fullPathList = fullPathList.split("/") # REST parametry
query = {}
queryList = form.list
for item in queryList:
decodedValue = urllib.unquote(item.value)
try:
decodedValue = unicode(decodedValue, "utf-8")
except:
decodedValue = codecs.decode(decodedValue, "latin-1")
decodedValue = urllib.unquote(decodedValue)
query[item.name] = decodedValue
response = ProcessRequest(fullPathList, query, HTTPResponse(False))
if response.mimeFormat in ["text/html", "text/javascript", "text/plain"]:
print "Content-Type: " + response.mimeFormat + ";charset=utf-8" # HTML is following
print # blank line, end of headers
sys.stdout.write(response.htmlData.encode('utf-8'))
else:
print "Content-Type: " + "application/octet-stream" # response.mimeFormat
print # blank line, end of headers
if sys.platform != "win32":
sys.stdout.write(response.htmlData)
sys.stdout.flush()
else:
import os, msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
sys.stdout.write(response.htmlData)
sys.stdout.flush()
msvcrt.setmode(sys.stdout.fileno(), os.O_TEXT)
else:
app = WorkflowsApplication(urls, globals())
app.run(port=port)
| 35.193416
| 112
| 0.588167
|
4a192638d3b16f8740d341c8bc848e6e2d253a9f
| 4,701
|
py
|
Python
|
library_samples/Python3/ocs_sample_library_preview/BaseClient.py
|
osi-awoodall/OSI-Samples-OCS
|
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
|
[
"Apache-2.0"
] | null | null | null |
library_samples/Python3/ocs_sample_library_preview/BaseClient.py
|
osi-awoodall/OSI-Samples-OCS
|
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
|
[
"Apache-2.0"
] | null | null | null |
library_samples/Python3/ocs_sample_library_preview/BaseClient.py
|
osi-awoodall/OSI-Samples-OCS
|
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
|
[
"Apache-2.0"
] | null | null | null |
# BaseClient.py
#
import json
from .SdsError import SdsError
import requests
import time
class BaseClient(object):
"""Handles communication with Sds Service. Internal Use"""
def __init__(self, apiversion, tenant, url, clientId, clientSecret,
acceptVerbosity=False):
self.__apiversion = apiversion
self.__tenant = tenant
self.__clientId = clientId
self.__clientSecret = clientSecret
self.__url = url # if resource.endswith("/") else resource + "/"
self.__token = ""
self.__expiration = 0
self.__getToken()
self.__acceptVerbosity = acceptVerbosity
self.__requestTimeout = None
self.__uri_API = url + '/api/' + apiversion
@property
def uri(self):
"""
Gets the base url
:return:
"""
return self.__url
@property
def uri_API(self):
"""
Returns the base URL plus api versioning information
:return:
"""
return self.__uri_API
@property
def api_version(self):
"""
Returns just the base api versioning information
:return:
"""
return self.__apiversion
@property
def tenant(self):
"""
Returns the tenant ID
:return:
"""
return self.__tenant
@property
def AcceptVerbosity(self):
return self.__acceptVerbosity
@AcceptVerbosity.setter
def AcceptVerbosity(self, accept_verbosity):
self.__acceptVerbosity = accept_verbosity
@property
def RequestTimeout(self):
return self.__requestTimeout
@RequestTimeout.setter
def RequestTimeout(self, timeout):
self.__requestTimeout = timeout
def __getToken(self):
"""
Gets the bearer token
:return:
"""
if ((self.__expiration - time.time()) > 5 * 60):
return self.__token
discoveryUrl = requests.get(
self.__url + "/identity/.well-known/openid-configuration",
headers={"Accept": "application/json"})
if discoveryUrl.status_code < 200 or discoveryUrl.status_code >= 300:
discoveryUrl.close()
status = discoveryUrl.status_code
reason = discoveryUrl.text
raise SdsError(f"Failed to get access token endpoint "
f"from discovery URL: {status}:{reason}")
tokenEndpoint = json.loads(discoveryUrl.content)["token_endpoint"]
tokenInformation = requests.post(
tokenEndpoint,
data={"client_id": self.__clientId,
"client_secret": self.__clientSecret,
"grant_type": "client_credentials"})
token = json.loads(tokenInformation.content)
expiration = token.get("expires_in", None)
if expiration is None:
raise SdsError(f"Failed to get token, check client id/secret: {token['error']}")
self.__expiration = float(expiration) + time.time()
self.__token = token['access_token']
return self.__token
def sdsHeaders(self):
"""
Gets the base headers needed for OCS call
:return:
"""
headers = {"Authorization": "Bearer %s" % self.__getToken(),
"Content-type": "application/json",
"Accept": "application/json"}
if (self.__acceptVerbosity):
headers['Accept-Verbosity'] = "verbose"
if self.__requestTimeout is not None:
headers['Request-Timeout'] = str(self.__requestTimeout)
return headers
def checkResponse(self, response, main_message):
if response.status_code < 200 or response.status_code >= 300:
status = response.status_code
reason = response.text
url = response.url
opId = response.headers["Operation-Id"]
error = f" {status}:{reason}. URL {url} OperationId {opId}"
response.close()
message = main_message + error
raise SdsError(message)
#this happens on a collection return that is partially successful
if response.status_code == 207:
status = response.status_code
error = response.json["Error"]
reason = response.json["Reason"]
errors = str(response.json["ChildErrors"])
url = response.url
opId = response.headers["Operation-Id"]
errorToWrite = f" {status}:{error}:{reason}. \n\n{errors}\n\n URL {url} OperationId {opId}"
response.close()
message = main_message + errorToWrite
raise SdsError(message)
| 30.134615
| 106
| 0.588598
|
4a192751f2597b6f48a108b1bcee8c03ee37d5a5
| 110
|
py
|
Python
|
test/celery_test_utils.py
|
lxkaka/celery-prometheus-exporter
|
967e8dc2320f4ce8be6e61d05ea2bf506a811140
|
[
"MIT"
] | null | null | null |
test/celery_test_utils.py
|
lxkaka/celery-prometheus-exporter
|
967e8dc2320f4ce8be6e61d05ea2bf506a811140
|
[
"MIT"
] | null | null | null |
test/celery_test_utils.py
|
lxkaka/celery-prometheus-exporter
|
967e8dc2320f4ce8be6e61d05ea2bf506a811140
|
[
"MIT"
] | null | null | null |
import celery
def get_celery_app():
return celery.Celery(broker='memory://', backend='cache+memory://')
| 18.333333
| 71
| 0.7
|
4a19276f4faa66c88b6f3c4f13f6b034f81e63e0
| 80
|
py
|
Python
|
ex08a EMOJI.py
|
RODRIGOKTK/Python-exercicios
|
f7985f2c277aae8b158bdeea4f2493febaaf06c5
|
[
"Unlicense"
] | null | null | null |
ex08a EMOJI.py
|
RODRIGOKTK/Python-exercicios
|
f7985f2c277aae8b158bdeea4f2493febaaf06c5
|
[
"Unlicense"
] | null | null | null |
ex08a EMOJI.py
|
RODRIGOKTK/Python-exercicios
|
f7985f2c277aae8b158bdeea4f2493febaaf06c5
|
[
"Unlicense"
] | null | null | null |
import emoji
print(emoji.emojize('Olá mundo :sunglasses:', use_aliases=True))
| 26.666667
| 65
| 0.7625
|
4a192875b85bb9f574d68dd60e6cbc6e1b126483
| 6,257
|
py
|
Python
|
seqal/datasets.py
|
tech-sketch/SeqAL
|
05999f722438fbb418768393d8dc209a18383b6b
|
[
"MIT"
] | null | null | null |
seqal/datasets.py
|
tech-sketch/SeqAL
|
05999f722438fbb418768393d8dc209a18383b6b
|
[
"MIT"
] | 20
|
2022-01-13T05:14:51.000Z
|
2022-03-11T07:30:40.000Z
|
seqal/datasets.py
|
tech-sketch/SeqAL
|
05999f722438fbb418768393d8dc209a18383b6b
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Dict, List, Union
from flair.data import Corpus as ParentCorpus
from flair.data import Sentence
from flair.datasets import ColumnDataset as ParentColumnDataset
from flair.datasets.base import find_train_dev_test_files
from torch.utils.data import Dataset
from torch.utils.data.dataset import ConcatDataset
class Corpus(ParentCorpus):
"""The modified Corpus class.
Args:
ParentCorpus: The original Corpus class.
"""
def get_all_sentences(self) -> Dataset:
"""Refactor method of flair.data.corpus
Returns:
Dataset: flair dataset.
"""
parts = []
if self.train:
parts.append(self.train.sentences)
if self.dev:
parts.append(self.dev.sentences)
if self.test:
parts.append(self.test.sentences)
return ConcatDataset(parts)
def add_queried_samples(self, queried_samples: List[Sentence]) -> None:
"""Add queried data to labeled data.
Args:
queried_samples (List[Sentence]): Queried data.
"""
for sample in queried_samples:
self.train.sentences.append(sample)
class ColumnCorpus(Corpus):
def __init__(
self,
data_folder: Union[str, Path],
column_format: Dict[int, str],
train_file=None,
test_file=None,
dev_file=None,
tag_to_bioes=None,
column_delimiter: str = r"\s+",
comment_symbol: str = None,
encoding: str = "utf-8",
document_separator_token: str = None,
skip_first_line: bool = False,
in_memory: bool = True,
label_name_map: Dict[str, str] = None,
banned_sentences: List[str] = None,
autofind_splits: bool = True,
**corpusargs,
):
"""Instantiates a Corpus from CoNLL column-formatted task data such as CoNLL03 or CoNLL2000.
Args:
data_folder (Union[str, Path]): Base folder with the task data
column_format (Dict[int, str]): A map specifying the column format
train_file ([type], optional): The name of the train file
test_file ([type], optional): The name of the test file
dev_file ([type], optional): The name of the dev file, if None, dev data is sampled from train
tag_to_bioes ([type], optional): Whether to convert to BIOES tagging scheme
column_delimiter (str, optional): Default is to split on any separatator,
but you can overwrite for instance with "\t" to split only on tabs
comment_symbol (str, optional): If set, lines that begin with this symbol are treated as comments
encoding (str, optional): Encodings. Defaults to "utf-8".
document_separator_token (str, optional): If provided, sentences that function as document
boundaries are so marked
skip_first_line (bool, optional): Set to True if your dataset has a header line
in_memory (bool, optional): If set to True, the dataset is kept in memory as Sentence objects,
otherwise does disk reads
label_name_map (Dict[str, str], optional): Optionally map tag names to different schema.
banned_sentences (List[str], optional): Optionally remove sentences from the corpus.
Works only if `in_memory` is true
autofind_splits (bool, optional): Defaults to True.
Returns:
Dataset: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = find_train_dev_test_files(
data_folder, dev_file, test_file, train_file, autofind_splits
)
# get train data
train = (
ColumnDataset(
train_file,
column_format,
tag_to_bioes,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
# banned_sentences=banned_sentences,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
)
if train_file is not None
else None
)
# read in test file if exists
test = (
ColumnDataset(
test_file,
column_format,
tag_to_bioes,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
# banned_sentences=banned_sentences,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
)
if test_file is not None
else None
)
# read in dev file if exists
dev = (
ColumnDataset(
dev_file,
column_format,
tag_to_bioes,
encoding=encoding,
comment_symbol=comment_symbol,
# banned_sentences=banned_sentences,
column_delimiter=column_delimiter,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
label_name_map=label_name_map,
)
if dev_file is not None
else None
)
super(ColumnCorpus, self).__init__(
train, dev, test, name=str(data_folder), **corpusargs
)
class ColumnDataset(ParentColumnDataset):
def __len__(self):
"""Override method"""
return len(self.sentences)
def obtain_statistics(self, name: str = "Pool", tag_type: str = None):
return Corpus._obtain_statistics_for(
self.sentences, name=name, tag_type=tag_type
)
| 37.467066
| 112
| 0.590858
|
4a1929283b90e99fe8fcadbf60bb6d8a151fad6c
| 486
|
py
|
Python
|
tests/conftest.py
|
ftbernales/groundmotion-processing
|
5be88da75e7168bd2421973d6f1e54a91c679dc8
|
[
"Unlicense"
] | null | null | null |
tests/conftest.py
|
ftbernales/groundmotion-processing
|
5be88da75e7168bd2421973d6f1e54a91c679dc8
|
[
"Unlicense"
] | null | null | null |
tests/conftest.py
|
ftbernales/groundmotion-processing
|
5be88da75e7168bd2421973d6f1e54a91c679dc8
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
#
# This is needed here so that the matplotlib backend gets
# set before any other imports of matplotlib
#
import matplotlib
matplotlib.use('Agg')
def pytest_configure(config):
#
# This tells get_config_paths() (shakemap.utils.config) to
# return paths into the testing part of the repo
#
os.environ['CALLED_FROM_PYTEST'] = 'True'
def pytest_unconfigure(config):
del os.environ['CALLED_FROM_PYTEST']
| 20.25
| 62
| 0.707819
|
4a19298fecc095356a3a0676b768ae3949d31293
| 3,479
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | 1
|
2020-02-26T13:44:57.000Z
|
2020-02-26T13:44:57.000Z
|
python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle.fluid as fluid
from op_test import OpTest
class TestFillConstantBatchSizeLikeWhenFirstDimIsBatchSize(OpTest):
def setUp(self):
self.op_type = "fill_constant_batch_size_like"
self.inputs = {'Input': np.random.random((219, 232)).astype("float32")}
self.attrs = {'value': 3.5, 'shape': [-1, 132, 7]}
out = np.random.random((219, 132, 7)).astype("float32")
out.fill(3.5)
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
class TestFillConstantBatchSizeLikeWhenSecondDimIsBatchSize(OpTest):
def setUp(self):
self.op_type = "fill_constant_batch_size_like"
self.inputs = {'Input': np.random.random((219, 232)).astype("float32")}
self.attrs = {
'value': 3.5,
'shape': [132, -1, 7],
'input_dim_idx': 0,
'output_dim_idx': 1
}
out = np.random.random((132, 219, 7)).astype("float32")
out.fill(3.5)
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
class TestFillConstantBatchSizeLikeInt64(OpTest):
def setUp(self):
self.op_type = "fill_constant_batch_size_like"
self.inputs = {'Input': np.random.random((219, 232)).astype("int64")}
self.attrs = {'value': 5894589485094, 'shape': [-1, 132, 7]}
out = np.random.random((219, 132, 7)).astype("int64")
out.fill(5894589485094)
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest):
def setUp(self):
self.op_type = "fill_constant_batch_size_like"
self.inputs = {
'Input': (np.random.random((31, 28)).astype("float32"),
[[9, 14, 8]])
}
self.attrs = {
'value': 3.5,
'shape': [-1, 16],
'input_dim_idx': 0,
'output_dim_idx': 0
}
out = np.random.random((3, 16)).astype("float32")
out.fill(3.5)
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
# Test python API
class TestFillConstantBatchSizeLikeAPI(unittest.TestCase):
def test_api(self):
like = fluid.layers.fill_constant(
shape=[1, 200], value=10, dtype='int64')
out = fluid.layers.fill_constant_batch_size_like(
input=like, shape=[2, 300], value=1315454564656, dtype='int64')
exe = fluid.Executor(place=fluid.CPUPlace())
res, = exe.run(fluid.default_main_program(), fetch_list=[out])
assert np.array_equal(
res[0], np.full(
[300], 1315454564656, dtype="int64"))
if __name__ == "__main__":
unittest.main()
| 31.917431
| 79
| 0.626329
|
4a192a0a19d29cea9b57847ccd8d9060b304dcfe
| 22,367
|
py
|
Python
|
allauth/account/forms.py
|
iarp/django-allauth
|
116b7a89c37149aeab083d1542539e6c99ba91a7
|
[
"MIT"
] | null | null | null |
allauth/account/forms.py
|
iarp/django-allauth
|
116b7a89c37149aeab083d1542539e6c99ba91a7
|
[
"MIT"
] | null | null | null |
allauth/account/forms.py
|
iarp/django-allauth
|
116b7a89c37149aeab083d1542539e6c99ba91a7
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import warnings
from importlib import import_module
from django import forms
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.sites.shortcuts import get_current_site
from django.core import exceptions, validators
from django.urls import reverse
from django.utils.translation import gettext, gettext_lazy as _, pgettext
from ..utils import (
build_absolute_uri,
get_username_max_length,
set_form_field_order,
)
from . import app_settings
from .adapter import get_adapter
from .app_settings import AuthenticationMethod
from .models import EmailAddress
from .utils import (
filter_users_by_email,
get_user_model,
perform_login,
setup_user_email,
sync_user_email_addresses,
url_str_to_user_pk,
user_email,
user_pk_to_url_str,
user_username,
)
class EmailAwarePasswordResetTokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
ret = super(EmailAwarePasswordResetTokenGenerator, self)._make_hash_value(
user, timestamp
)
sync_user_email_addresses(user)
email = user_email(user)
emails = set([email] if email else [])
emails.update(
EmailAddress.objects.filter(user=user).values_list("email", flat=True)
)
ret += "|".join(sorted(emails))
return ret
default_token_generator = EmailAwarePasswordResetTokenGenerator()
class PasswordVerificationMixin(object):
def clean(self):
cleaned_data = super(PasswordVerificationMixin, self).clean()
password1 = cleaned_data.get("password1")
password2 = cleaned_data.get("password2")
if (password1 and password2) and password1 != password2:
self.add_error("password2", _("You must type the same password each time."))
return cleaned_data
class PasswordField(forms.CharField):
def __init__(self, *args, **kwargs):
render_value = kwargs.pop(
"render_value", app_settings.PASSWORD_INPUT_RENDER_VALUE
)
kwargs["widget"] = forms.PasswordInput(
render_value=render_value,
attrs={"placeholder": kwargs.get("label")},
)
autocomplete = kwargs.pop("autocomplete", None)
if autocomplete is not None:
kwargs["widget"].attrs["autocomplete"] = autocomplete
super(PasswordField, self).__init__(*args, **kwargs)
class SetPasswordField(PasswordField):
def __init__(self, *args, **kwargs):
kwargs["autocomplete"] = "new-password"
super(SetPasswordField, self).__init__(*args, **kwargs)
self.user = None
def clean(self, value):
value = super(SetPasswordField, self).clean(value)
value = get_adapter().clean_password(value, user=self.user)
return value
class LoginForm(forms.Form):
password = PasswordField(label=_("Password"), autocomplete="current-password")
remember = forms.BooleanField(label=_("Remember Me"), required=False)
user = None
error_messages = {
"account_inactive": _("This account is currently inactive."),
"email_password_mismatch": _(
"The e-mail address and/or password you specified are not correct."
),
"username_password_mismatch": _(
"The username and/or password you specified are not correct."
),
}
def __init__(self, *args, **kwargs):
self.request = kwargs.pop("request", None)
super(LoginForm, self).__init__(*args, **kwargs)
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
login_widget = forms.TextInput(
attrs={
"type": "email",
"placeholder": _("E-mail address"),
"autocomplete": "email",
}
)
login_field = forms.EmailField(label=_("E-mail"), widget=login_widget)
elif app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.USERNAME:
login_widget = forms.TextInput(
attrs={"placeholder": _("Username"), "autocomplete": "username"}
)
login_field = forms.CharField(
label=_("Username"),
widget=login_widget,
max_length=get_username_max_length(),
)
else:
assert (
app_settings.AUTHENTICATION_METHOD
== AuthenticationMethod.USERNAME_EMAIL
)
login_widget = forms.TextInput(
attrs={"placeholder": _("Username or e-mail"), "autocomplete": "email"}
)
login_field = forms.CharField(
label=pgettext("field label", "Login"), widget=login_widget
)
self.fields["login"] = login_field
set_form_field_order(self, ["login", "password", "remember"])
if app_settings.SESSION_REMEMBER is not None:
del self.fields["remember"]
def user_credentials(self):
"""
Provides the credentials required to authenticate the user for
login.
"""
credentials = {}
login = self.cleaned_data["login"]
if app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.EMAIL:
credentials["email"] = login
elif app_settings.AUTHENTICATION_METHOD == AuthenticationMethod.USERNAME:
credentials["username"] = login
else:
if self._is_login_email(login):
credentials["email"] = login
credentials["username"] = login
credentials["password"] = self.cleaned_data["password"]
return credentials
def clean_login(self):
login = self.cleaned_data["login"]
return login.strip()
def _is_login_email(self, login):
try:
validators.validate_email(login)
ret = True
except exceptions.ValidationError:
ret = False
return ret
def clean(self):
super(LoginForm, self).clean()
if self._errors:
return
credentials = self.user_credentials()
user = get_adapter(self.request).authenticate(self.request, **credentials)
if user:
self.user = user
else:
auth_method = app_settings.AUTHENTICATION_METHOD
if auth_method == app_settings.AuthenticationMethod.USERNAME_EMAIL:
login = self.cleaned_data["login"]
if self._is_login_email(login):
auth_method = app_settings.AuthenticationMethod.EMAIL
else:
auth_method = app_settings.AuthenticationMethod.USERNAME
raise forms.ValidationError(
self.error_messages["%s_password_mismatch" % auth_method]
)
return self.cleaned_data
def login(self, request, redirect_url=None):
email = self.user_credentials().get("email")
ret = perform_login(
request,
self.user,
email_verification=app_settings.EMAIL_VERIFICATION,
redirect_url=redirect_url,
email=email,
)
remember = app_settings.SESSION_REMEMBER
if remember is None:
remember = self.cleaned_data["remember"]
if remember:
request.session.set_expiry(app_settings.SESSION_COOKIE_AGE)
else:
request.session.set_expiry(0)
return ret
class _DummyCustomSignupForm(forms.Form):
def signup(self, request, user):
"""
Invoked at signup time to complete the signup of the user.
"""
pass
def _base_signup_form_class():
"""
Currently, we inherit from the custom form, if any. This is all
not very elegant, though it serves a purpose:
- There are two signup forms: one for local accounts, and one for
social accounts
- Both share a common base (BaseSignupForm)
- Given the above, how to put in a custom signup form? Which form
would your custom form derive from, the local or the social one?
"""
if not app_settings.SIGNUP_FORM_CLASS:
return _DummyCustomSignupForm
try:
fc_module, fc_classname = app_settings.SIGNUP_FORM_CLASS.rsplit(".", 1)
except ValueError:
raise exceptions.ImproperlyConfigured(
"%s does not point to a form class" % app_settings.SIGNUP_FORM_CLASS
)
try:
mod = import_module(fc_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured(
"Error importing form class %s:" ' "%s"' % (fc_module, e)
)
try:
fc_class = getattr(mod, fc_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(
'Module "%s" does not define a' ' "%s" class' % (fc_module, fc_classname)
)
if not hasattr(fc_class, "signup"):
if hasattr(fc_class, "save"):
warnings.warn(
"The custom signup form must offer"
" a `def signup(self, request, user)` method",
DeprecationWarning,
)
else:
raise exceptions.ImproperlyConfigured(
'The custom signup form must implement a "signup" method'
)
return fc_class
class BaseSignupForm(_base_signup_form_class()):
username = forms.CharField(
label=_("Username"),
min_length=app_settings.USERNAME_MIN_LENGTH,
widget=forms.TextInput(
attrs={"placeholder": _("Username"), "autocomplete": "username"}
),
)
email = forms.EmailField(
widget=forms.TextInput(
attrs={
"type": "email",
"placeholder": _("E-mail address"),
"autocomplete": "email",
}
)
)
def __init__(self, *args, **kwargs):
email_required = kwargs.pop("email_required", app_settings.EMAIL_REQUIRED)
self.username_required = kwargs.pop(
"username_required", app_settings.USERNAME_REQUIRED
)
super(BaseSignupForm, self).__init__(*args, **kwargs)
username_field = self.fields["username"]
username_field.max_length = get_username_max_length()
username_field.validators.append(
validators.MaxLengthValidator(username_field.max_length)
)
username_field.widget.attrs["maxlength"] = str(username_field.max_length)
default_field_order = [
"email",
"email2", # ignored when not present
"username",
"password1",
"password2", # ignored when not present
]
if app_settings.SIGNUP_EMAIL_ENTER_TWICE:
self.fields["email2"] = forms.EmailField(
label=_("E-mail (again)"),
widget=forms.TextInput(
attrs={
"type": "email",
"placeholder": _("E-mail address confirmation"),
}
),
)
if email_required:
self.fields["email"].label = gettext("E-mail")
self.fields["email"].required = True
else:
self.fields["email"].label = gettext("E-mail (optional)")
self.fields["email"].required = False
self.fields["email"].widget.is_required = False
if self.username_required:
default_field_order = [
"username",
"email",
"email2", # ignored when not present
"password1",
"password2", # ignored when not present
]
if not self.username_required:
del self.fields["username"]
set_form_field_order(
self, getattr(self, "field_order", None) or default_field_order
)
def clean_username(self):
value = self.cleaned_data["username"]
value = get_adapter().clean_username(value)
return value
def clean_email(self):
value = self.cleaned_data["email"]
value = get_adapter().clean_email(value)
if value and app_settings.UNIQUE_EMAIL:
value = self.validate_unique_email(value)
return value
def validate_unique_email(self, value):
return get_adapter().validate_unique_email(value)
def clean(self):
cleaned_data = super(BaseSignupForm, self).clean()
if app_settings.SIGNUP_EMAIL_ENTER_TWICE:
email = cleaned_data.get("email")
email2 = cleaned_data.get("email2")
if (email and email2) and email != email2:
self.add_error("email2", _("You must type the same email each time."))
return cleaned_data
def custom_signup(self, request, user):
custom_form = super(BaseSignupForm, self)
if hasattr(custom_form, "signup") and callable(custom_form.signup):
custom_form.signup(request, user)
else:
warnings.warn(
"The custom signup form must offer"
" a `def signup(self, request, user)` method",
DeprecationWarning,
)
# Historically, it was called .save, but this is confusing
# in case of ModelForm
custom_form.save(user)
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
super(SignupForm, self).__init__(*args, **kwargs)
self.fields["password1"] = PasswordField(
label=_("Password"), autocomplete="new-password"
)
if app_settings.SIGNUP_PASSWORD_ENTER_TWICE:
self.fields["password2"] = PasswordField(label=_("Password (again)"))
if hasattr(self, "field_order"):
set_form_field_order(self, self.field_order)
def clean(self):
super(SignupForm, self).clean()
# `password` cannot be of type `SetPasswordField`, as we don't
# have a `User` yet. So, let's populate a dummy user to be used
# for password validaton.
dummy_user = get_user_model()
user_username(dummy_user, self.cleaned_data.get("username"))
user_email(dummy_user, self.cleaned_data.get("email"))
password = self.cleaned_data.get("password1")
if password:
try:
get_adapter().clean_password(password, user=dummy_user)
except forms.ValidationError as e:
self.add_error("password1", e)
if (
app_settings.SIGNUP_PASSWORD_ENTER_TWICE
and "password1" in self.cleaned_data
and "password2" in self.cleaned_data
):
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
self.add_error(
"password2",
_("You must type the same password each time."),
)
return self.cleaned_data
def save(self, request):
adapter = get_adapter(request)
user = adapter.new_user(request)
adapter.save_user(request, user, self)
self.custom_signup(request, user)
# TODO: Move into adapter `save_user` ?
setup_user_email(request, user, [])
return user
class UserForm(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
class AddEmailForm(UserForm):
email = forms.EmailField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(
attrs={"type": "email", "placeholder": _("E-mail address")}
),
)
def clean_email(self):
value = self.cleaned_data["email"]
value = get_adapter().clean_email(value)
errors = {
"this_account": _(
"This e-mail address is already associated with this account."
),
"different_account": _(
"This e-mail address is already associated with another account."
),
"max_email_addresses": _("You cannot add more than %d e-mail addresses."),
}
users = filter_users_by_email(value)
on_this_account = [u for u in users if u.pk == self.user.pk]
on_diff_account = [u for u in users if u.pk != self.user.pk]
if on_this_account:
raise forms.ValidationError(errors["this_account"])
if on_diff_account and app_settings.UNIQUE_EMAIL:
raise forms.ValidationError(errors["different_account"])
if not EmailAddress.objects.can_add_email(self.user):
raise forms.ValidationError(
errors["max_email_addresses"] % app_settings.MAX_EMAIL_ADDRESSES
)
return value
def save(self, request):
return EmailAddress.objects.add_email(
request, self.user, self.cleaned_data["email"], confirm=True
)
class ChangePasswordForm(PasswordVerificationMixin, UserForm):
oldpassword = PasswordField(
label=_("Current Password"), autocomplete="current-password"
)
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def __init__(self, *args, **kwargs):
super(ChangePasswordForm, self).__init__(*args, **kwargs)
self.fields["password1"].user = self.user
def clean_oldpassword(self):
if not self.user.check_password(self.cleaned_data.get("oldpassword")):
raise forms.ValidationError(_("Please type your current password."))
return self.cleaned_data["oldpassword"]
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class SetPasswordForm(PasswordVerificationMixin, UserForm):
password1 = SetPasswordField(label=_("Password"))
password2 = PasswordField(label=_("Password (again)"))
def __init__(self, *args, **kwargs):
super(SetPasswordForm, self).__init__(*args, **kwargs)
self.fields["password1"].user = self.user
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class ResetPasswordForm(forms.Form):
email = forms.EmailField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(
attrs={
"type": "email",
"placeholder": _("E-mail address"),
"autocomplete": "email",
}
),
)
def clean_email(self):
email = self.cleaned_data["email"]
email = get_adapter().clean_email(email)
self.users = filter_users_by_email(email, is_active=True)
if not self.users and not app_settings.PREVENT_ENUMERATION:
raise forms.ValidationError(
_("The e-mail address is not assigned to any user account")
)
return self.cleaned_data["email"]
def save(self, request, **kwargs):
email = self.cleaned_data["email"]
if not self.users:
self._send_unknown_account_mail(request, email)
else:
self._send_password_reset_mail(request, email, self.users, **kwargs)
return email
def _send_unknown_account_mail(self, request, email):
signup_url = build_absolute_uri(request, reverse("account_signup"))
context = {
"current_site": get_current_site(request),
"email": email,
"request": request,
"signup_url": signup_url,
}
get_adapter(request).send_mail("account/email/unknown_account", email, context)
def _send_password_reset_mail(self, request, email, users, **kwargs):
token_generator = kwargs.get("token_generator", default_token_generator)
for user in users:
temp_key = token_generator.make_token(user)
# save it to the password reset model
# password_reset = PasswordReset(user=user, temp_key=temp_key)
# password_reset.save()
# send the password reset email
path = reverse(
"account_reset_password_from_key",
kwargs=dict(uidb36=user_pk_to_url_str(user), key=temp_key),
)
url = build_absolute_uri(request, path)
context = {
"current_site": get_current_site(request),
"user": user,
"password_reset_url": url,
"request": request,
}
if app_settings.AUTHENTICATION_METHOD != AuthenticationMethod.EMAIL:
context["username"] = user_username(user)
get_adapter(request).send_mail(
"account/email/password_reset_key", email, context
)
class ResetPasswordKeyForm(PasswordVerificationMixin, forms.Form):
password1 = SetPasswordField(label=_("New Password"))
password2 = PasswordField(label=_("New Password (again)"))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user", None)
self.temp_key = kwargs.pop("temp_key", None)
super(ResetPasswordKeyForm, self).__init__(*args, **kwargs)
self.fields["password1"].user = self.user
def save(self):
get_adapter().set_password(self.user, self.cleaned_data["password1"])
class UserTokenForm(forms.Form):
uidb36 = forms.CharField()
key = forms.CharField()
reset_user = None
token_generator = default_token_generator
error_messages = {
"token_invalid": _("The password reset token was invalid."),
}
def _get_user(self, uidb36):
User = get_user_model()
try:
pk = url_str_to_user_pk(uidb36)
return User.objects.get(pk=pk)
except (ValueError, User.DoesNotExist):
return None
def clean(self):
cleaned_data = super(UserTokenForm, self).clean()
uidb36 = cleaned_data.get("uidb36", None)
key = cleaned_data.get("key", None)
if not key:
raise forms.ValidationError(self.error_messages["token_invalid"])
self.reset_user = self._get_user(uidb36)
if self.reset_user is None or not self.token_generator.check_token(
self.reset_user, key
):
raise forms.ValidationError(self.error_messages["token_invalid"])
return cleaned_data
| 35.27918
| 88
| 0.611705
|
4a192a224a42a622ff9cba7d41e4c09c8b5b683b
| 12,874
|
py
|
Python
|
sdk/python/pulumi_command/local/command.py
|
pulumi/pulumi-command
|
0f04bbaaaf0d69d959389139fc2e60cefdb8b9c0
|
[
"Apache-2.0"
] | 13
|
2021-12-30T10:08:57.000Z
|
2022-03-20T18:51:01.000Z
|
sdk/python/pulumi_command/local/command.py
|
pulumi/pulumi-command
|
0f04bbaaaf0d69d959389139fc2e60cefdb8b9c0
|
[
"Apache-2.0"
] | 33
|
2021-12-15T10:14:25.000Z
|
2022-03-28T23:53:28.000Z
|
sdk/python/pulumi_command/local/command.py
|
pulumi/pulumi-command
|
0f04bbaaaf0d69d959389139fc2e60cefdb8b9c0
|
[
"Apache-2.0"
] | 9
|
2022-01-10T12:05:16.000Z
|
2022-03-14T21:40:17.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['CommandArgs', 'Command']
@pulumi.input_type
class CommandArgs:
def __init__(__self__, *,
create: Optional[pulumi.Input[str]] = None,
delete: Optional[pulumi.Input[str]] = None,
dir: Optional[pulumi.Input[str]] = None,
environment: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
interpreter: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stdin: Optional[pulumi.Input[str]] = None,
triggers: Optional[pulumi.Input[Sequence[Any]]] = None):
"""
The set of arguments for constructing a Command resource.
:param pulumi.Input[str] create: The command to run on create.
:param pulumi.Input[str] delete: The command to run on delete.
:param pulumi.Input[str] dir: The working directory in which to run the command from.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment: Additional environment variables available to the command's process.
:param pulumi.Input[Sequence[pulumi.Input[str]]] interpreter: The program and arguments to run the command.
On Linux and macOS, defaults to: `["/bin/sh", "-c"]`. On Windows, defaults to: `["cmd", "/C"]`
:param pulumi.Input[str] stdin: Pass a string to the command's process as standard in
"""
if create is not None:
pulumi.set(__self__, "create", create)
if delete is not None:
pulumi.set(__self__, "delete", delete)
if dir is not None:
pulumi.set(__self__, "dir", dir)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if interpreter is not None:
pulumi.set(__self__, "interpreter", interpreter)
if stdin is not None:
pulumi.set(__self__, "stdin", stdin)
if triggers is not None:
pulumi.set(__self__, "triggers", triggers)
@property
@pulumi.getter
def create(self) -> Optional[pulumi.Input[str]]:
"""
The command to run on create.
"""
return pulumi.get(self, "create")
@create.setter
def create(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create", value)
@property
@pulumi.getter
def delete(self) -> Optional[pulumi.Input[str]]:
"""
The command to run on delete.
"""
return pulumi.get(self, "delete")
@delete.setter
def delete(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delete", value)
@property
@pulumi.getter
def dir(self) -> Optional[pulumi.Input[str]]:
"""
The working directory in which to run the command from.
"""
return pulumi.get(self, "dir")
@dir.setter
def dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dir", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Additional environment variables available to the command's process.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter
def interpreter(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The program and arguments to run the command.
On Linux and macOS, defaults to: `["/bin/sh", "-c"]`. On Windows, defaults to: `["cmd", "/C"]`
"""
return pulumi.get(self, "interpreter")
@interpreter.setter
def interpreter(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "interpreter", value)
@property
@pulumi.getter
def stdin(self) -> Optional[pulumi.Input[str]]:
"""
Pass a string to the command's process as standard in
"""
return pulumi.get(self, "stdin")
@stdin.setter
def stdin(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stdin", value)
@property
@pulumi.getter
def triggers(self) -> Optional[pulumi.Input[Sequence[Any]]]:
return pulumi.get(self, "triggers")
@triggers.setter
def triggers(self, value: Optional[pulumi.Input[Sequence[Any]]]):
pulumi.set(self, "triggers", value)
class Command(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create: Optional[pulumi.Input[str]] = None,
delete: Optional[pulumi.Input[str]] = None,
dir: Optional[pulumi.Input[str]] = None,
environment: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
interpreter: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stdin: Optional[pulumi.Input[str]] = None,
triggers: Optional[pulumi.Input[Sequence[Any]]] = None,
__props__=None):
"""
A local command to be executed.
This command can be inserted into the life cycles of other resources using the
`dependsOn` or `parent` resource options. A command is considered to have
failed when it finished with a non-zero exit code. This will fail the CRUD step
of the `Command` resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] create: The command to run on create.
:param pulumi.Input[str] delete: The command to run on delete.
:param pulumi.Input[str] dir: The working directory in which to run the command from.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] environment: Additional environment variables available to the command's process.
:param pulumi.Input[Sequence[pulumi.Input[str]]] interpreter: The program and arguments to run the command.
On Linux and macOS, defaults to: `["/bin/sh", "-c"]`. On Windows, defaults to: `["cmd", "/C"]`
:param pulumi.Input[str] stdin: Pass a string to the command's process as standard in
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[CommandArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A local command to be executed.
This command can be inserted into the life cycles of other resources using the
`dependsOn` or `parent` resource options. A command is considered to have
failed when it finished with a non-zero exit code. This will fail the CRUD step
of the `Command` resource.
:param str resource_name: The name of the resource.
:param CommandArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CommandArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
create: Optional[pulumi.Input[str]] = None,
delete: Optional[pulumi.Input[str]] = None,
dir: Optional[pulumi.Input[str]] = None,
environment: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
interpreter: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
stdin: Optional[pulumi.Input[str]] = None,
triggers: Optional[pulumi.Input[Sequence[Any]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CommandArgs.__new__(CommandArgs)
__props__.__dict__["create"] = create
__props__.__dict__["delete"] = delete
__props__.__dict__["dir"] = dir
__props__.__dict__["environment"] = environment
__props__.__dict__["interpreter"] = interpreter
__props__.__dict__["stdin"] = stdin
__props__.__dict__["triggers"] = triggers
__props__.__dict__["stderr"] = None
__props__.__dict__["stdout"] = None
super(Command, __self__).__init__(
'command:local:Command',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Command':
"""
Get an existing Command resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = CommandArgs.__new__(CommandArgs)
__props__.__dict__["create"] = None
__props__.__dict__["delete"] = None
__props__.__dict__["dir"] = None
__props__.__dict__["environment"] = None
__props__.__dict__["interpreter"] = None
__props__.__dict__["stderr"] = None
__props__.__dict__["stdin"] = None
__props__.__dict__["stdout"] = None
__props__.__dict__["triggers"] = None
return Command(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def create(self) -> pulumi.Output[Optional[str]]:
"""
The command to run on create.
"""
return pulumi.get(self, "create")
@property
@pulumi.getter
def delete(self) -> pulumi.Output[Optional[str]]:
"""
The command to run on delete.
"""
return pulumi.get(self, "delete")
@property
@pulumi.getter
def dir(self) -> pulumi.Output[Optional[str]]:
"""
The directory from which to run the command from. If `dir` does not exist, then
`Command` will fail.
"""
return pulumi.get(self, "dir")
@property
@pulumi.getter
def environment(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Additional environment variables available to the command's process.
"""
return pulumi.get(self, "environment")
@property
@pulumi.getter
def interpreter(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The program and arguments to run the command.
For example: `["/bin/sh", "-c"]`
"""
return pulumi.get(self, "interpreter")
@property
@pulumi.getter
def stderr(self) -> pulumi.Output[str]:
"""
The standard error of the command's process
"""
return pulumi.get(self, "stderr")
@property
@pulumi.getter
def stdin(self) -> pulumi.Output[Optional[str]]:
"""
Pass a string to the command's process as standard in
"""
return pulumi.get(self, "stdin")
@property
@pulumi.getter
def stdout(self) -> pulumi.Output[str]:
"""
The standard output of the command's process
"""
return pulumi.get(self, "stdout")
@property
@pulumi.getter
def triggers(self) -> pulumi.Output[Optional[Sequence[Any]]]:
"""
Trigger replacements on changes to this input.
"""
return pulumi.get(self, "triggers")
| 39.734568
| 142
| 0.615659
|
4a192a317785a1947a04a588328a5979d7df9f47
| 89,128
|
py
|
Python
|
venv/lib/python3.9/site-packages/pyarrow/parquet.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-17T02:58:50.000Z
|
2022-01-17T02:58:50.000Z
|
venv/lib/python3.9/site-packages/pyarrow/parquet.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | 2
|
2021-12-03T08:48:53.000Z
|
2021-12-06T16:42:29.000Z
|
venv/lib/python3.9/site-packages/pyarrow/parquet.py
|
CMU-IDS-2022/final-project-the-evaluators
|
3b9262ad1a0f7315208a94a05ea1ce38e679d01d
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from concurrent import futures
from functools import partial, reduce
import json
from collections.abc import Collection
import numpy as np
import os
import re
import operator
import urllib.parse
import warnings
import pyarrow as pa
import pyarrow.lib as lib
import pyarrow._parquet as _parquet
from pyarrow._parquet import (ParquetReader, Statistics, # noqa
FileMetaData, RowGroupMetaData,
ColumnChunkMetaData,
ParquetSchema, ColumnSchema)
from pyarrow.fs import (LocalFileSystem, FileSystem,
_resolve_filesystem_and_path, _ensure_filesystem)
from pyarrow import filesystem as legacyfs
from pyarrow.util import guid, _is_path_like, _stringify_path
_URI_STRIP_SCHEMES = ('hdfs',)
def _parse_uri(path):
path = _stringify_path(path)
parsed_uri = urllib.parse.urlparse(path)
if parsed_uri.scheme in _URI_STRIP_SCHEMES:
return parsed_uri.path
else:
# ARROW-4073: On Windows returning the path with the scheme
# stripped removes the drive letter, if any
return path
def _get_filesystem_and_path(passed_filesystem, path):
if passed_filesystem is None:
return legacyfs.resolve_filesystem_and_path(path, passed_filesystem)
else:
passed_filesystem = legacyfs._ensure_filesystem(passed_filesystem)
parsed_path = _parse_uri(path)
return passed_filesystem, parsed_path
def _check_contains_null(val):
if isinstance(val, bytes):
for byte in val:
if isinstance(byte, bytes):
compare_to = chr(0)
else:
compare_to = 0
if byte == compare_to:
return True
elif isinstance(val, str):
return '\x00' in val
return False
def _check_filters(filters, check_null_strings=True):
"""
Check if filters are well-formed.
"""
if filters is not None:
if len(filters) == 0 or any(len(f) == 0 for f in filters):
raise ValueError("Malformed filters")
if isinstance(filters[0][0], str):
# We have encountered the situation where we have one nesting level
# too few:
# We have [(,,), ..] instead of [[(,,), ..]]
filters = [filters]
if check_null_strings:
for conjunction in filters:
for col, op, val in conjunction:
if (
isinstance(val, list) and
all(_check_contains_null(v) for v in val) or
_check_contains_null(val)
):
raise NotImplementedError(
"Null-terminated binary strings are not supported "
"as filter values."
)
return filters
_DNF_filter_doc = """Predicates are expressed in disjunctive normal form (DNF), like
``[[('x', '=', 0), ...], ...]``. DNF allows arbitrary boolean logical
combinations of single column predicates. The innermost tuples each
describe a single column predicate. The list of inner predicates is
interpreted as a conjunction (AND), forming a more selective and
multiple column predicate. Finally, the most outer list combines these
filters as a disjunction (OR).
Predicates may also be passed as List[Tuple]. This form is interpreted
as a single conjunction. To express OR in predicates, one must
use the (preferred) List[List[Tuple]] notation.
Each tuple has format: (``key``, ``op``, ``value``) and compares the
``key`` with the ``value``.
The supported ``op`` are: ``=`` or ``==``, ``!=``, ``<``, ``>``, ``<=``,
``>=``, ``in`` and ``not in``. If the ``op`` is ``in`` or ``not in``, the
``value`` must be a collection such as a ``list``, a ``set`` or a
``tuple``.
Examples:
.. code-block:: python
('x', '=', 0)
('y', 'in', ['a', 'b', 'c'])
('z', 'not in', {'a','b'})
"""
def _filters_to_expression(filters):
"""
Check if filters are well-formed.
See _DNF_filter_doc above for more details.
"""
import pyarrow.dataset as ds
if isinstance(filters, ds.Expression):
return filters
filters = _check_filters(filters, check_null_strings=False)
def convert_single_predicate(col, op, val):
field = ds.field(col)
if op == "=" or op == "==":
return field == val
elif op == "!=":
return field != val
elif op == '<':
return field < val
elif op == '>':
return field > val
elif op == '<=':
return field <= val
elif op == '>=':
return field >= val
elif op == 'in':
return field.isin(val)
elif op == 'not in':
return ~field.isin(val)
else:
raise ValueError(
'"{0}" is not a valid operator in predicates.'.format(
(col, op, val)))
disjunction_members = []
for conjunction in filters:
conjunction_members = [
convert_single_predicate(col, op, val)
for col, op, val in conjunction
]
disjunction_members.append(reduce(operator.and_, conjunction_members))
return reduce(operator.or_, disjunction_members)
# ----------------------------------------------------------------------
# Reading a single Parquet file
class ParquetFile:
"""
Reader interface for a single Parquet file.
Parameters
----------
source : str, pathlib.Path, pyarrow.NativeFile, or file-like object
Readable source. For passing bytes or buffer-like file containing a
Parquet file, use pyarrow.BufferReader.
metadata : FileMetaData, default None
Use existing metadata object, rather than reading from file.
common_metadata : FileMetaData, default None
Will be used in reads for pandas schema metadata if not found in the
main file's metadata, no other uses at the moment.
memory_map : bool, default False
If the source is a file path, use a memory map to read file, which can
improve performance in some environments.
buffer_size : int, default 0
If positive, perform read buffering when deserializing individual
column chunks. Otherwise IO calls are unbuffered.
pre_buffer : bool, default False
Coalesce and issue file reads in parallel to improve performance on
high-latency filesystems (e.g. S3). If True, Arrow will use a
background I/O thread pool.
read_dictionary : list
List of column names to read directly as DictionaryArray.
coerce_int96_timestamp_unit : str, default None.
Cast timestamps that are stored in INT96 format to a particular
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
and therefore INT96 timestamps will be infered as timestamps
in nanoseconds.
"""
def __init__(self, source, metadata=None, common_metadata=None,
read_dictionary=None, memory_map=False, buffer_size=0,
pre_buffer=False, coerce_int96_timestamp_unit=None):
self.reader = ParquetReader()
self.reader.open(
source, use_memory_map=memory_map,
buffer_size=buffer_size, pre_buffer=pre_buffer,
read_dictionary=read_dictionary, metadata=metadata,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
self.common_metadata = common_metadata
self._nested_paths_by_prefix = self._build_nested_paths()
def _build_nested_paths(self):
paths = self.reader.column_paths
result = defaultdict(list)
for i, path in enumerate(paths):
key = path[0]
rest = path[1:]
while True:
result[key].append(i)
if not rest:
break
key = '.'.join((key, rest[0]))
rest = rest[1:]
return result
@property
def metadata(self):
return self.reader.metadata
@property
def schema(self):
"""
Return the Parquet schema, unconverted to Arrow types
"""
return self.metadata.schema
@property
def schema_arrow(self):
"""
Return the inferred Arrow schema, converted from the whole Parquet
file's schema
"""
return self.reader.schema_arrow
@property
def num_row_groups(self):
return self.reader.num_row_groups
def read_row_group(self, i, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a single row group from a Parquet file.
Parameters
----------
i : int
Index of the individual row group that we want to read.
columns : list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the row group as a table (of columns)
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_group(i, column_indices=column_indices,
use_threads=use_threads)
def read_row_groups(self, row_groups, columns=None, use_threads=True,
use_pandas_metadata=False):
"""
Read a multiple row groups from a Parquet file.
Parameters
----------
row_groups : list
Only these row groups will be read from the file.
columns : list
If not None, only these columns will be read from the row group. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the row groups as a table (of columns).
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_row_groups(row_groups,
column_indices=column_indices,
use_threads=use_threads)
def iter_batches(self, batch_size=65536, row_groups=None, columns=None,
use_threads=True, use_pandas_metadata=False):
"""
Read streaming batches from a Parquet file
Parameters
----------
batch_size : int, default 64K
Maximum number of records to yield per batch. Batches may be
smaller if there aren't enough rows in the file.
row_groups : list
Only these row groups will be read from the file.
columns : list
If not None, only these columns will be read from the file. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : boolean, default True
Perform multi-threaded column reads.
use_pandas_metadata : boolean, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
iterator of pyarrow.RecordBatch
Contents of each batch as a record batch
"""
if row_groups is None:
row_groups = range(0, self.metadata.num_row_groups)
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
batches = self.reader.iter_batches(batch_size,
row_groups=row_groups,
column_indices=column_indices,
use_threads=use_threads)
return batches
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read a Table from Parquet format,
Parameters
----------
columns : list
If not None, only these columns will be read from the file. A
column name may be a prefix of a nested field, e.g. 'a' will select
'a.b', 'a.c', and 'a.d.e'.
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.table.Table
Content of the file as a table (of columns).
"""
column_indices = self._get_column_indices(
columns, use_pandas_metadata=use_pandas_metadata)
return self.reader.read_all(column_indices=column_indices,
use_threads=use_threads)
def scan_contents(self, columns=None, batch_size=65536):
"""
Read contents of file for the given columns and batch size.
Notes
-----
This function's primary purpose is benchmarking.
The scan is executed on a single thread.
Parameters
----------
columns : list of integers, default None
Select columns to read, if None scan all columns.
batch_size : int, default 64K
Number of rows to read at a time internally.
Returns
-------
num_rows : number of rows in file
"""
column_indices = self._get_column_indices(columns)
return self.reader.scan_contents(column_indices,
batch_size=batch_size)
def _get_column_indices(self, column_names, use_pandas_metadata=False):
if column_names is None:
return None
indices = []
for name in column_names:
if name in self._nested_paths_by_prefix:
indices.extend(self._nested_paths_by_prefix[name])
if use_pandas_metadata:
file_keyvalues = self.metadata.metadata
common_keyvalues = (self.common_metadata.metadata
if self.common_metadata is not None
else None)
if file_keyvalues and b'pandas' in file_keyvalues:
index_columns = _get_pandas_index_columns(file_keyvalues)
elif common_keyvalues and b'pandas' in common_keyvalues:
index_columns = _get_pandas_index_columns(common_keyvalues)
else:
index_columns = []
if indices is not None and index_columns:
indices += [self.reader.column_name_idx(descr)
for descr in index_columns
if not isinstance(descr, dict)]
return indices
_SPARK_DISALLOWED_CHARS = re.compile('[ ,;{}()\n\t=]')
def _sanitized_spark_field_name(name):
return _SPARK_DISALLOWED_CHARS.sub('_', name)
def _sanitize_schema(schema, flavor):
if 'spark' in flavor:
sanitized_fields = []
schema_changed = False
for field in schema:
name = field.name
sanitized_name = _sanitized_spark_field_name(name)
if sanitized_name != name:
schema_changed = True
sanitized_field = pa.field(sanitized_name, field.type,
field.nullable, field.metadata)
sanitized_fields.append(sanitized_field)
else:
sanitized_fields.append(field)
new_schema = pa.schema(sanitized_fields, metadata=schema.metadata)
return new_schema, schema_changed
else:
return schema, False
def _sanitize_table(table, new_schema, flavor):
# TODO: This will not handle prohibited characters in nested field names
if 'spark' in flavor:
column_data = [table[i] for i in range(table.num_columns)]
return pa.Table.from_arrays(column_data, schema=new_schema)
else:
return table
_parquet_writer_arg_docs = """version : {"1.0", "2.4", "2.6"}, default "1.0"
Determine which Parquet logical types are available for use, whether the
reduced set from the Parquet 1.x.x format or the expanded logical types
added in later format versions.
Files written with version='2.4' or '2.6' may not be readable in all
Parquet implementations, so version='1.0' is likely the choice that
maximizes file compatibility.
UINT32 and some logical types are only available with version '2.4'.
Nanosecond timestamps are only available with version '2.6'.
Other features such as compression algorithms or the new serialized
data page format must be enabled separately (see 'compression' and
'data_page_version').
use_dictionary : bool or list
Specify if we should use dictionary encoding in general or only for
some columns.
use_deprecated_int96_timestamps : bool, default None
Write timestamps to INT96 Parquet format. Defaults to False unless enabled
by flavor argument. This take priority over the coerce_timestamps option.
coerce_timestamps : str, default None
Cast timestamps to a particular resolution. If omitted, defaults are chosen
depending on `version`. By default, for ``version='1.0'`` (the default)
and ``version='2.4'``, nanoseconds are cast to microseconds ('us'), while
for other `version` values, they are written natively without loss
of resolution. Seconds are always cast to milliseconds ('ms') by default,
as Parquet does not have any temporal type with seconds resolution.
If the casting results in loss of data, it will raise an exception
unless ``allow_truncated_timestamps=True`` is given.
Valid values: {None, 'ms', 'us'}
data_page_size : int, default None
Set a target threshold for the approximate encoded size of data
pages within a column chunk (in bytes). If None, use the default data page
size of 1MByte.
allow_truncated_timestamps : bool, default False
Allow loss of data when coercing timestamps to a particular
resolution. E.g. if microsecond or nanosecond data is lost when coercing to
'ms', do not raise an exception. Passing ``allow_truncated_timestamp=True``
will NOT result in the truncation exception being ignored unless
``coerce_timestamps`` is not None.
compression : str or dict
Specify the compression codec, either on a general basis or per-column.
Valid values: {'NONE', 'SNAPPY', 'GZIP', 'BROTLI', 'LZ4', 'ZSTD'}.
write_statistics : bool or list
Specify if we should write statistics in general (default is True) or only
for some columns.
flavor : {'spark'}, default None
Sanitize schema or set other compatibility options to work with
various target systems.
filesystem : FileSystem, default None
If nothing passed, will be inferred from `where` if path-like, else
`where` is already a file-like object so no filesystem is needed.
compression_level : int or dict, default None
Specify the compression level for a codec, either on a general basis or
per-column. If None is passed, arrow selects the compression level for
the compression codec in use. The compression level has a different
meaning for each codec, so you have to read the documentation of the
codec you are using.
An exception is thrown if the compression codec does not allow specifying
a compression level.
use_byte_stream_split : bool or list, default False
Specify if the byte_stream_split encoding should be used in general or
only for some columns. If both dictionary and byte_stream_stream are
enabled, then dictionary is preferred.
The byte_stream_split encoding is valid only for floating-point data types
and should be combined with a compression codec.
column_encoding : string or dict, default None
Specify the encoding scheme on a per column basis.
Currently supported values: {'PLAIN', 'BYTE_STREAM_SPLIT'}.
Certain encodings are only compatible with certain data types.
Please refer to the encodings section of `Reading and writing Parquet
files <https://arrow.apache.org/docs/cpp/parquet.html#encodings>`_.
data_page_version : {"1.0", "2.0"}, default "1.0"
The serialized Parquet data page format version to write, defaults to
1.0. This does not impact the file schema logical types and Arrow to
Parquet type casting behavior; for that use the "version" option.
use_compliant_nested_type : bool, default False
Whether to write compliant Parquet nested type (lists) as defined
`here <https://github.com/apache/parquet-format/blob/master/
LogicalTypes.md#nested-types>`_, defaults to ``False``.
For ``use_compliant_nested_type=True``, this will write into a list
with 3-level structure where the middle level, named ``list``,
is a repeated group with a single field named ``element``::
<list-repetition> group <name> (LIST) {
repeated group list {
<element-repetition> <element-type> element;
}
}
For ``use_compliant_nested_type=False``, this will also write into a list
with 3-level structure, where the name of the single field of the middle
level ``list`` is taken from the element name for nested columns in Arrow,
which defaults to ``item``::
<list-repetition> group <name> (LIST) {
repeated group list {
<element-repetition> <element-type> item;
}
}
"""
class ParquetWriter:
__doc__ = """
Class for incrementally building a Parquet file for Arrow tables.
Parameters
----------
where : path or file-like object
schema : pyarrow.Schema
{}
writer_engine_version : unused
**options : dict
If options contains a key `metadata_collector` then the
corresponding value is assumed to be a list (or any object with
`.append` method) that will be filled with the file metadata instance
of the written file.
""".format(_parquet_writer_arg_docs)
def __init__(self, where, schema, filesystem=None,
flavor=None,
version='1.0',
use_dictionary=True,
compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
compression_level=None,
use_byte_stream_split=False,
column_encoding=None,
writer_engine_version=None,
data_page_version='1.0',
use_compliant_nested_type=False,
**options):
if use_deprecated_int96_timestamps is None:
# Use int96 timestamps for Spark
if flavor is not None and 'spark' in flavor:
use_deprecated_int96_timestamps = True
else:
use_deprecated_int96_timestamps = False
self.flavor = flavor
if flavor is not None:
schema, self.schema_changed = _sanitize_schema(schema, flavor)
else:
self.schema_changed = False
self.schema = schema
self.where = where
# If we open a file using a filesystem, store file handle so we can be
# sure to close it when `self.close` is called.
self.file_handle = None
filesystem, path = _resolve_filesystem_and_path(
where, filesystem, allow_legacy_filesystem=True
)
if filesystem is not None:
if isinstance(filesystem, legacyfs.FileSystem):
# legacy filesystem (eg custom subclass)
# TODO deprecate
sink = self.file_handle = filesystem.open(path, 'wb')
else:
# ARROW-10480: do not auto-detect compression. While
# a filename like foo.parquet.gz is nonconforming, it
# shouldn't implicitly apply compression.
sink = self.file_handle = filesystem.open_output_stream(
path, compression=None)
else:
sink = where
self._metadata_collector = options.pop('metadata_collector', None)
engine_version = 'V2'
self.writer = _parquet.ParquetWriter(
sink, schema,
version=version,
compression=compression,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
use_deprecated_int96_timestamps=use_deprecated_int96_timestamps,
compression_level=compression_level,
use_byte_stream_split=use_byte_stream_split,
column_encoding=column_encoding,
writer_engine_version=engine_version,
data_page_version=data_page_version,
use_compliant_nested_type=use_compliant_nested_type,
**options)
self.is_open = True
def __del__(self):
if getattr(self, 'is_open', False):
self.close()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
# return false since we want to propagate exceptions
return False
def write(self, table_or_batch, row_group_size=None):
"""
Write RecordBatch or Table to the Parquet file.
Parameters
----------
table_or_batch : {RecordBatch, Table}
row_group_size : int, default None
Maximum size of each written row group. If None, the
row group size will be the minimum of the input
table or batch length and 64 * 1024 * 1024.
"""
if isinstance(table_or_batch, pa.RecordBatch):
self.write_batch(table_or_batch, row_group_size)
elif isinstance(table_or_batch, pa.Table):
self.write_table(table_or_batch, row_group_size)
else:
raise TypeError(type(table_or_batch))
def write_batch(self, batch, row_group_size=None):
"""
Write RecordBatch to the Parquet file.
Parameters
----------
batch : RecordBatch
row_group_size : int, default None
Maximum size of each written row group. If None, the
row group size will be the minimum of the RecordBatch
size and 64 * 1024 * 1024.
"""
table = pa.Table.from_batches([batch], batch.schema)
self.write_table(table, row_group_size)
def write_table(self, table, row_group_size=None):
"""
Write Table to the Parquet file.
Parameters
----------
table : Table
row_group_size : int, default None
Maximum size of each written row group. If None, the
row group size will be the minimum of the Table size
and 64 * 1024 * 1024.
"""
if self.schema_changed:
table = _sanitize_table(table, self.schema, self.flavor)
assert self.is_open
if not table.schema.equals(self.schema, check_metadata=False):
msg = ('Table schema does not match schema used to create file: '
'\ntable:\n{!s} vs. \nfile:\n{!s}'
.format(table.schema, self.schema))
raise ValueError(msg)
self.writer.write_table(table, row_group_size=row_group_size)
def close(self):
if self.is_open:
self.writer.close()
self.is_open = False
if self._metadata_collector is not None:
self._metadata_collector.append(self.writer.metadata)
if self.file_handle is not None:
self.file_handle.close()
def _get_pandas_index_columns(keyvalues):
return (json.loads(keyvalues[b'pandas'].decode('utf8'))
['index_columns'])
# ----------------------------------------------------------------------
# Metadata container providing instructions about reading a single Parquet
# file, possibly part of a partitioned dataset
class ParquetDatasetPiece:
"""
DEPRECATED: A single chunk of a potentially larger Parquet dataset to read.
The arguments will indicate to read either a single row group or all row
groups, and whether to add partition keys to the resulting pyarrow.Table.
.. deprecated:: 5.0
Directly constructing a ``ParquetDatasetPiece`` is deprecated, as well
as accessing the pieces of a ``ParquetDataset`` object. Specify
``use_legacy_dataset=False`` when constructing the ``ParquetDataset``
and use the ``ParquetDataset.fragments`` attribute instead.
Parameters
----------
path : str or pathlib.Path
Path to file in the file system where this piece is located.
open_file_func : callable
Function to use for obtaining file handle to dataset piece.
partition_keys : list of tuples
Two-element tuples of ``(column name, ordinal index)``.
row_group : int, default None
Row group to load. By default, reads all row groups.
file_options : dict
Options
"""
def __init__(self, path, open_file_func=partial(open, mode='rb'),
file_options=None, row_group=None, partition_keys=None):
warnings.warn(
"ParquetDatasetPiece is deprecated as of pyarrow 5.0.0 and will "
"be removed in a future version.",
DeprecationWarning, stacklevel=2)
self._init(
path, open_file_func, file_options, row_group, partition_keys)
@staticmethod
def _create(path, open_file_func=partial(open, mode='rb'),
file_options=None, row_group=None, partition_keys=None):
self = ParquetDatasetPiece.__new__(ParquetDatasetPiece)
self._init(
path, open_file_func, file_options, row_group, partition_keys)
return self
def _init(self, path, open_file_func, file_options, row_group,
partition_keys):
self.path = _stringify_path(path)
self.open_file_func = open_file_func
self.row_group = row_group
self.partition_keys = partition_keys or []
self.file_options = file_options or {}
def __eq__(self, other):
if not isinstance(other, ParquetDatasetPiece):
return False
return (self.path == other.path and
self.row_group == other.row_group and
self.partition_keys == other.partition_keys)
def __repr__(self):
return ('{}({!r}, row_group={!r}, partition_keys={!r})'
.format(type(self).__name__, self.path,
self.row_group,
self.partition_keys))
def __str__(self):
result = ''
if len(self.partition_keys) > 0:
partition_str = ', '.join('{}={}'.format(name, index)
for name, index in self.partition_keys)
result += 'partition[{}] '.format(partition_str)
result += self.path
if self.row_group is not None:
result += ' | row_group={}'.format(self.row_group)
return result
def get_metadata(self):
"""
Return the file's metadata.
Returns
-------
metadata : FileMetaData
"""
f = self.open()
return f.metadata
def open(self):
"""
Return instance of ParquetFile.
"""
reader = self.open_file_func(self.path)
if not isinstance(reader, ParquetFile):
reader = ParquetFile(reader, **self.file_options)
return reader
def read(self, columns=None, use_threads=True, partitions=None,
file=None, use_pandas_metadata=False):
"""
Read this piece as a pyarrow.Table.
Parameters
----------
columns : list of column names, default None
use_threads : bool, default True
Perform multi-threaded column reads.
partitions : ParquetPartitions, default None
file : file-like object
Passed to ParquetFile.
use_pandas_metadata : bool
If pandas metadata should be used or not.
Returns
-------
table : pyarrow.Table
"""
if self.open_file_func is not None:
reader = self.open()
elif file is not None:
reader = ParquetFile(file, **self.file_options)
else:
# try to read the local path
reader = ParquetFile(self.path, **self.file_options)
options = dict(columns=columns,
use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if self.row_group is not None:
table = reader.read_row_group(self.row_group, **options)
else:
table = reader.read(**options)
if len(self.partition_keys) > 0:
if partitions is None:
raise ValueError('Must pass partition sets')
# Here, the index is the categorical code of the partition where
# this piece is located. Suppose we had
#
# /foo=a/0.parq
# /foo=b/0.parq
# /foo=c/0.parq
#
# Then we assign a=0, b=1, c=2. And the resulting Table pieces will
# have a DictionaryArray column named foo having the constant index
# value as indicated. The distinct categories of the partition have
# been computed in the ParquetManifest
for i, (name, index) in enumerate(self.partition_keys):
# The partition code is the same for all values in this piece
indices = np.full(len(table), index, dtype='i4')
# This is set of all partition values, computed as part of the
# manifest, so ['a', 'b', 'c'] as in our example above.
dictionary = partitions.levels[i].dictionary
arr = pa.DictionaryArray.from_arrays(indices, dictionary)
table = table.append_column(name, arr)
return table
class PartitionSet:
"""
A data structure for cataloguing the observed Parquet partitions at a
particular level. So if we have
/foo=a/bar=0
/foo=a/bar=1
/foo=a/bar=2
/foo=b/bar=0
/foo=b/bar=1
/foo=b/bar=2
Then we have two partition sets, one for foo, another for bar. As we visit
levels of the partition hierarchy, a PartitionSet tracks the distinct
values and assigns categorical codes to use when reading the pieces
Parameters
----------
name : str
Name of the partition set. Under which key to collect all values.
keys : list
All possible values that have been collected for that partition set.
"""
def __init__(self, name, keys=None):
self.name = name
self.keys = keys or []
self.key_indices = {k: i for i, k in enumerate(self.keys)}
self._dictionary = None
def get_index(self, key):
"""
Get the index of the partition value if it is known, otherwise assign
one
Parameters
----------
key : The value for which we want to known the index.
"""
if key in self.key_indices:
return self.key_indices[key]
else:
index = len(self.key_indices)
self.keys.append(key)
self.key_indices[key] = index
return index
@property
def dictionary(self):
if self._dictionary is not None:
return self._dictionary
if len(self.keys) == 0:
raise ValueError('No known partition keys')
# Only integer and string partition types are supported right now
try:
integer_keys = [int(x) for x in self.keys]
dictionary = lib.array(integer_keys)
except ValueError:
dictionary = lib.array(self.keys)
self._dictionary = dictionary
return dictionary
@property
def is_sorted(self):
return list(self.keys) == sorted(self.keys)
class ParquetPartitions:
def __init__(self):
self.levels = []
self.partition_names = set()
def __len__(self):
return len(self.levels)
def __getitem__(self, i):
return self.levels[i]
def equals(self, other):
if not isinstance(other, ParquetPartitions):
raise TypeError('`other` must be an instance of ParquetPartitions')
return (self.levels == other.levels and
self.partition_names == other.partition_names)
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def get_index(self, level, name, key):
"""
Record a partition value at a particular level, returning the distinct
code for that value at that level.
Examples
--------
partitions.get_index(1, 'foo', 'a') returns 0
partitions.get_index(1, 'foo', 'b') returns 1
partitions.get_index(1, 'foo', 'c') returns 2
partitions.get_index(1, 'foo', 'a') returns 0
Parameters
----------
level : int
The nesting level of the partition we are observing
name : str
The partition name
key : str or int
The partition value
"""
if level == len(self.levels):
if name in self.partition_names:
raise ValueError('{} was the name of the partition in '
'another level'.format(name))
part_set = PartitionSet(name)
self.levels.append(part_set)
self.partition_names.add(name)
return self.levels[level].get_index(key)
def filter_accepts_partition(self, part_key, filter, level):
p_column, p_value_index = part_key
f_column, op, f_value = filter
if p_column != f_column:
return True
f_type = type(f_value)
if op in {'in', 'not in'}:
if not isinstance(f_value, Collection):
raise TypeError(
"'%s' object is not a collection", f_type.__name__)
if not f_value:
raise ValueError("Cannot use empty collection as filter value")
if len({type(item) for item in f_value}) != 1:
raise ValueError("All elements of the collection '%s' must be"
" of same type", f_value)
f_type = type(next(iter(f_value)))
elif not isinstance(f_value, str) and isinstance(f_value, Collection):
raise ValueError(
"Op '%s' not supported with a collection value", op)
p_value = f_type(self.levels[level]
.dictionary[p_value_index].as_py())
if op == "=" or op == "==":
return p_value == f_value
elif op == "!=":
return p_value != f_value
elif op == '<':
return p_value < f_value
elif op == '>':
return p_value > f_value
elif op == '<=':
return p_value <= f_value
elif op == '>=':
return p_value >= f_value
elif op == 'in':
return p_value in f_value
elif op == 'not in':
return p_value not in f_value
else:
raise ValueError("'%s' is not a valid operator in predicates.",
filter[1])
class ParquetManifest:
def __init__(self, dirpath, open_file_func=None, filesystem=None,
pathsep='/', partition_scheme='hive', metadata_nthreads=1):
filesystem, dirpath = _get_filesystem_and_path(filesystem, dirpath)
self.filesystem = filesystem
self.open_file_func = open_file_func
self.pathsep = pathsep
self.dirpath = _stringify_path(dirpath)
self.partition_scheme = partition_scheme
self.partitions = ParquetPartitions()
self.pieces = []
self._metadata_nthreads = metadata_nthreads
self._thread_pool = futures.ThreadPoolExecutor(
max_workers=metadata_nthreads)
self.common_metadata_path = None
self.metadata_path = None
self._visit_level(0, self.dirpath, [])
# Due to concurrency, pieces will potentially by out of order if the
# dataset is partitioned so we sort them to yield stable results
self.pieces.sort(key=lambda piece: piece.path)
if self.common_metadata_path is None:
# _common_metadata is a subset of _metadata
self.common_metadata_path = self.metadata_path
self._thread_pool.shutdown()
def _visit_level(self, level, base_path, part_keys):
fs = self.filesystem
_, directories, files = next(fs.walk(base_path))
filtered_files = []
for path in files:
full_path = self.pathsep.join((base_path, path))
if path.endswith('_common_metadata'):
self.common_metadata_path = full_path
elif path.endswith('_metadata'):
self.metadata_path = full_path
elif self._should_silently_exclude(path):
continue
else:
filtered_files.append(full_path)
# ARROW-1079: Filter out "private" directories starting with underscore
filtered_directories = [self.pathsep.join((base_path, x))
for x in directories
if not _is_private_directory(x)]
filtered_files.sort()
filtered_directories.sort()
if len(filtered_files) > 0 and len(filtered_directories) > 0:
raise ValueError('Found files in an intermediate '
'directory: {}'.format(base_path))
elif len(filtered_directories) > 0:
self._visit_directories(level, filtered_directories, part_keys)
else:
self._push_pieces(filtered_files, part_keys)
def _should_silently_exclude(self, file_name):
return (file_name.endswith('.crc') or # Checksums
file_name.endswith('_$folder$') or # HDFS directories in S3
file_name.startswith('.') or # Hidden files starting with .
file_name.startswith('_') or # Hidden files starting with _
file_name in EXCLUDED_PARQUET_PATHS)
def _visit_directories(self, level, directories, part_keys):
futures_list = []
for path in directories:
head, tail = _path_split(path, self.pathsep)
name, key = _parse_hive_partition(tail)
index = self.partitions.get_index(level, name, key)
dir_part_keys = part_keys + [(name, index)]
# If you have less threads than levels, the wait call will block
# indefinitely due to multiple waits within a thread.
if level < self._metadata_nthreads:
future = self._thread_pool.submit(self._visit_level,
level + 1,
path,
dir_part_keys)
futures_list.append(future)
else:
self._visit_level(level + 1, path, dir_part_keys)
if futures_list:
futures.wait(futures_list)
def _parse_partition(self, dirname):
if self.partition_scheme == 'hive':
return _parse_hive_partition(dirname)
else:
raise NotImplementedError('partition schema: {}'
.format(self.partition_scheme))
def _push_pieces(self, files, part_keys):
self.pieces.extend([
ParquetDatasetPiece._create(path, partition_keys=part_keys,
open_file_func=self.open_file_func)
for path in files
])
def _parse_hive_partition(value):
if '=' not in value:
raise ValueError('Directory name did not appear to be a '
'partition: {}'.format(value))
return value.split('=', 1)
def _is_private_directory(x):
_, tail = os.path.split(x)
return (tail.startswith('_') or tail.startswith('.')) and '=' not in tail
def _path_split(path, sep):
i = path.rfind(sep) + 1
head, tail = path[:i], path[i:]
head = head.rstrip(sep)
return head, tail
EXCLUDED_PARQUET_PATHS = {'_SUCCESS'}
class _ParquetDatasetMetadata:
__slots__ = ('fs', 'memory_map', 'read_dictionary', 'common_metadata',
'buffer_size')
def _open_dataset_file(dataset, path, meta=None):
if (dataset.fs is not None and
not isinstance(dataset.fs, legacyfs.LocalFileSystem)):
path = dataset.fs.open(path, mode='rb')
return ParquetFile(
path,
metadata=meta,
memory_map=dataset.memory_map,
read_dictionary=dataset.read_dictionary,
common_metadata=dataset.common_metadata,
buffer_size=dataset.buffer_size
)
_DEPR_MSG = (
"'{}' attribute is deprecated as of pyarrow 5.0.0 and will be removed "
"in a future version.{}"
)
_read_docstring_common = """\
read_dictionary : list, default None
List of names or column paths (for nested types) to read directly
as DictionaryArray. Only supported for BYTE_ARRAY storage. To read
a flat column as dictionary-encoded pass the column name. For
nested types, you must pass the full column "path", which could be
something like level1.level2.list.item. Refer to the Parquet
file's schema to obtain the paths.
memory_map : bool, default False
If the source is a file path, use a memory map to read file, which can
improve performance in some environments.
buffer_size : int, default 0
If positive, perform read buffering when deserializing individual
column chunks. Otherwise IO calls are unbuffered.
partitioning : pyarrow.dataset.Partitioning or str or list of str, \
default "hive"
The partitioning scheme for a partitioned dataset. The default of "hive"
assumes directory names with key=value pairs like "/year=2009/month=11".
In addition, a scheme like "/2009/11" is also supported, in which case
you need to specify the field names or a full schema. See the
``pyarrow.dataset.partitioning()`` function for more details."""
class ParquetDataset:
__doc__ = """
Encapsulates details of reading a complete Parquet dataset possibly
consisting of multiple files and partitions in subdirectories.
Parameters
----------
path_or_paths : str or List[str]
A directory name, single file name, or list of file names.
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem.
metadata : pyarrow.parquet.FileMetaData
Use metadata obtained elsewhere to validate file schemas.
schema : pyarrow.parquet.Schema
Use schema obtained elsewhere to validate file schemas. Alternative to
metadata parameter.
split_row_groups : bool, default False
Divide files into pieces for each row group in the file.
validate_schema : bool, default True
Check that individual file schemas are all the same / compatible.
filters : List[Tuple] or List[List[Tuple]] or None (default)
Rows which do not match the filter predicate will be removed from scanned
data. Partition keys embedded in a nested directory structure will be
exploited to avoid loading files at all if they contain no matching rows.
If `use_legacy_dataset` is True, filters can only reference partition
keys and only a hive-style directory structure is supported. When
setting `use_legacy_dataset` to False, also within-file level filtering
and different partitioning schemes are supported.
{1}
metadata_nthreads : int, default 1
How many threads to allow the thread pool which is used to read the
dataset metadata. Increasing this is helpful to read partitioned
datasets.
{0}
use_legacy_dataset : bool, default True
Set to False to enable the new code path (experimental, using the
new Arrow Dataset API). Among other things, this allows to pass
`filters` for all columns and not only the partition keys, enables
different partitioning schemes, etc.
pre_buffer : bool, default True
Coalesce and issue file reads in parallel to improve performance on
high-latency filesystems (e.g. S3). If True, Arrow will use a
background I/O thread pool. This option is only supported for
use_legacy_dataset=False. If using a filesystem layer that itself
performs readahead (e.g. fsspec's S3FS), disable readahead for best
results.
coerce_int96_timestamp_unit : str, default None.
Cast timestamps that are stored in INT96 format to a particular resolution
(e.g. 'ms'). Setting to None is equivalent to 'ns' and therefore INT96
timestamps will be infered as timestamps in nanoseconds.
""".format(_read_docstring_common, _DNF_filter_doc)
def __new__(cls, path_or_paths=None, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, read_dictionary=None,
memory_map=False, buffer_size=0, partitioning="hive",
use_legacy_dataset=None, pre_buffer=True,
coerce_int96_timestamp_unit=None):
if use_legacy_dataset is None:
# if a new filesystem is passed -> default to new implementation
if isinstance(filesystem, FileSystem):
use_legacy_dataset = False
# otherwise the default is still True
else:
use_legacy_dataset = True
if not use_legacy_dataset:
return _ParquetDatasetV2(
path_or_paths, filesystem=filesystem,
filters=filters,
partitioning=partitioning,
read_dictionary=read_dictionary,
memory_map=memory_map,
buffer_size=buffer_size,
pre_buffer=pre_buffer,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit,
# unsupported keywords
schema=schema, metadata=metadata,
split_row_groups=split_row_groups,
validate_schema=validate_schema,
metadata_nthreads=metadata_nthreads
)
self = object.__new__(cls)
return self
def __init__(self, path_or_paths, filesystem=None, schema=None,
metadata=None, split_row_groups=False, validate_schema=True,
filters=None, metadata_nthreads=1, read_dictionary=None,
memory_map=False, buffer_size=0, partitioning="hive",
use_legacy_dataset=True, pre_buffer=True,
coerce_int96_timestamp_unit=None):
if partitioning != "hive":
raise ValueError(
'Only "hive" for hive-like partitioning is supported when '
'using use_legacy_dataset=True')
self._metadata = _ParquetDatasetMetadata()
a_path = path_or_paths
if isinstance(a_path, list):
a_path = a_path[0]
self._metadata.fs, _ = _get_filesystem_and_path(filesystem, a_path)
if isinstance(path_or_paths, list):
self.paths = [_parse_uri(path) for path in path_or_paths]
else:
self.paths = _parse_uri(path_or_paths)
self._metadata.read_dictionary = read_dictionary
self._metadata.memory_map = memory_map
self._metadata.buffer_size = buffer_size
(self._pieces,
self._partitions,
self.common_metadata_path,
self.metadata_path) = _make_manifest(
path_or_paths, self._fs, metadata_nthreads=metadata_nthreads,
open_file_func=partial(_open_dataset_file, self._metadata)
)
if self.common_metadata_path is not None:
with self._fs.open(self.common_metadata_path) as f:
self._metadata.common_metadata = read_metadata(
f,
memory_map=memory_map
)
else:
self._metadata.common_metadata = None
if metadata is None and self.metadata_path is not None:
with self._fs.open(self.metadata_path) as f:
self.metadata = read_metadata(f, memory_map=memory_map)
else:
self.metadata = metadata
self.schema = schema
self.split_row_groups = split_row_groups
if split_row_groups:
raise NotImplementedError("split_row_groups not yet implemented")
if filters is not None:
filters = _check_filters(filters)
self._filter(filters)
if validate_schema:
self.validate_schemas()
def equals(self, other):
if not isinstance(other, ParquetDataset):
raise TypeError('`other` must be an instance of ParquetDataset')
if self._fs.__class__ != other._fs.__class__:
return False
for prop in ('paths', '_pieces', '_partitions',
'common_metadata_path', 'metadata_path',
'common_metadata', 'metadata', 'schema',
'split_row_groups'):
if getattr(self, prop) != getattr(other, prop):
return False
for prop in ('memory_map', 'buffer_size'):
if getattr(self._metadata, prop) != getattr(other._metadata, prop):
return False
return True
def __eq__(self, other):
try:
return self.equals(other)
except TypeError:
return NotImplemented
def validate_schemas(self):
if self.metadata is None and self.schema is None:
if self.common_metadata is not None:
self.schema = self.common_metadata.schema
else:
self.schema = self._pieces[0].get_metadata().schema
elif self.schema is None:
self.schema = self.metadata.schema
# Verify schemas are all compatible
dataset_schema = self.schema.to_arrow_schema()
# Exclude the partition columns from the schema, they are provided
# by the path, not the DatasetPiece
if self._partitions is not None:
for partition_name in self._partitions.partition_names:
if dataset_schema.get_field_index(partition_name) != -1:
field_idx = dataset_schema.get_field_index(partition_name)
dataset_schema = dataset_schema.remove(field_idx)
for piece in self._pieces:
file_metadata = piece.get_metadata()
file_schema = file_metadata.schema.to_arrow_schema()
if not dataset_schema.equals(file_schema, check_metadata=False):
raise ValueError('Schema in {!s} was different. \n'
'{!s}\n\nvs\n\n{!s}'
.format(piece, file_schema,
dataset_schema))
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read multiple Parquet files as a single pyarrow.Table.
Parameters
----------
columns : List[str]
Names of columns to read from the file.
use_threads : bool, default True
Perform multi-threaded column reads
use_pandas_metadata : bool, default False
Passed through to each dataset piece.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
tables = []
for piece in self._pieces:
table = piece.read(columns=columns, use_threads=use_threads,
partitions=self._partitions,
use_pandas_metadata=use_pandas_metadata)
tables.append(table)
all_data = lib.concat_tables(tables)
if use_pandas_metadata:
# We need to ensure that this metadata is set in the Table's schema
# so that Table.to_pandas will construct pandas.DataFrame with the
# right index
common_metadata = self._get_common_pandas_metadata()
current_metadata = all_data.schema.metadata or {}
if common_metadata and b'pandas' not in current_metadata:
all_data = all_data.replace_schema_metadata({
b'pandas': common_metadata})
return all_data
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details.
Parameters
----------
**kwargs : optional
All additional options to pass to the reader.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
return self.read(use_pandas_metadata=True, **kwargs)
def _get_common_pandas_metadata(self):
if self.common_metadata is None:
return None
keyvalues = self.common_metadata.metadata
return keyvalues.get(b'pandas', None)
def _filter(self, filters):
accepts_filter = self._partitions.filter_accepts_partition
def one_filter_accepts(piece, filter):
return all(accepts_filter(part_key, filter, level)
for level, part_key in enumerate(piece.partition_keys))
def all_filters_accept(piece):
return any(all(one_filter_accepts(piece, f) for f in conjunction)
for conjunction in filters)
self._pieces = [p for p in self._pieces if all_filters_accept(p)]
@property
def pieces(self):
warnings.warn(
_DEPR_MSG.format(
"ParquetDataset.pieces",
" Specify 'use_legacy_dataset=False' while constructing the "
"ParquetDataset, and then use the '.fragments' attribute "
"instead."),
DeprecationWarning, stacklevel=2)
return self._pieces
@property
def partitions(self):
warnings.warn(
_DEPR_MSG.format(
"ParquetDataset.partitions",
" Specify 'use_legacy_dataset=False' while constructing the "
"ParquetDataset, and then use the '.partitioning' attribute "
"instead."),
DeprecationWarning, stacklevel=2)
return self._partitions
@property
def memory_map(self):
warnings.warn(
_DEPR_MSG.format("ParquetDataset.memory_map", ""),
DeprecationWarning, stacklevel=2)
return self._metadata.memory_map
@property
def read_dictionary(self):
warnings.warn(
_DEPR_MSG.format("ParquetDataset.read_dictionary", ""),
DeprecationWarning, stacklevel=2)
return self._metadata.read_dictionary
@property
def buffer_size(self):
warnings.warn(
_DEPR_MSG.format("ParquetDataset.buffer_size", ""),
DeprecationWarning, stacklevel=2)
return self._metadata.buffer_size
_fs = property(
operator.attrgetter('_metadata.fs')
)
@property
def fs(self):
warnings.warn(
_DEPR_MSG.format(
"ParquetDataset.fs",
" Specify 'use_legacy_dataset=False' while constructing the "
"ParquetDataset, and then use the '.filesystem' attribute "
"instead."),
DeprecationWarning, stacklevel=2)
return self._metadata.fs
common_metadata = property(
operator.attrgetter('_metadata.common_metadata')
)
def _make_manifest(path_or_paths, fs, pathsep='/', metadata_nthreads=1,
open_file_func=None):
partitions = None
common_metadata_path = None
metadata_path = None
if isinstance(path_or_paths, list) and len(path_or_paths) == 1:
# Dask passes a directory as a list of length 1
path_or_paths = path_or_paths[0]
if _is_path_like(path_or_paths) and fs.isdir(path_or_paths):
manifest = ParquetManifest(path_or_paths, filesystem=fs,
open_file_func=open_file_func,
pathsep=getattr(fs, "pathsep", "/"),
metadata_nthreads=metadata_nthreads)
common_metadata_path = manifest.common_metadata_path
metadata_path = manifest.metadata_path
pieces = manifest.pieces
partitions = manifest.partitions
else:
if not isinstance(path_or_paths, list):
path_or_paths = [path_or_paths]
# List of paths
if len(path_or_paths) == 0:
raise ValueError('Must pass at least one file path')
pieces = []
for path in path_or_paths:
if not fs.isfile(path):
raise OSError('Passed non-file path: {}'
.format(path))
piece = ParquetDatasetPiece._create(
path, open_file_func=open_file_func)
pieces.append(piece)
return pieces, partitions, common_metadata_path, metadata_path
def _is_local_file_system(fs):
return isinstance(fs, LocalFileSystem) or isinstance(
fs, legacyfs.LocalFileSystem
)
class _ParquetDatasetV2:
"""
ParquetDataset shim using the Dataset API under the hood.
"""
def __init__(self, path_or_paths, filesystem=None, filters=None,
partitioning="hive", read_dictionary=None, buffer_size=None,
memory_map=False, ignore_prefixes=None, pre_buffer=True,
coerce_int96_timestamp_unit=None, **kwargs):
import pyarrow.dataset as ds
# Raise error for not supported keywords
for keyword, default in [
("schema", None), ("metadata", None),
("split_row_groups", False), ("validate_schema", True),
("metadata_nthreads", 1)]:
if keyword in kwargs and kwargs[keyword] is not default:
raise ValueError(
"Keyword '{0}' is not yet supported with the new "
"Dataset API".format(keyword))
# map format arguments
read_options = {
"pre_buffer": pre_buffer,
"coerce_int96_timestamp_unit": coerce_int96_timestamp_unit
}
if buffer_size:
read_options.update(use_buffered_stream=True,
buffer_size=buffer_size)
if read_dictionary is not None:
read_options.update(dictionary_columns=read_dictionary)
# map filters to Expressions
self._filters = filters
self._filter_expression = filters and _filters_to_expression(filters)
# map old filesystems to new one
if filesystem is not None:
filesystem = _ensure_filesystem(
filesystem, use_mmap=memory_map)
elif filesystem is None and memory_map:
# if memory_map is specified, assume local file system (string
# path can in principle be URI for any filesystem)
filesystem = LocalFileSystem(use_mmap=memory_map)
# This needs to be checked after _ensure_filesystem, because that
# handles the case of an fsspec LocalFileSystem
if (
hasattr(path_or_paths, "__fspath__") and
filesystem is not None and
not _is_local_file_system(filesystem)
):
raise TypeError(
"Path-like objects with __fspath__ must only be used with "
f"local file systems, not {type(filesystem)}"
)
# check for single fragment dataset
single_file = None
if isinstance(path_or_paths, list):
if len(path_or_paths) == 1:
single_file = path_or_paths[0]
else:
if _is_path_like(path_or_paths):
path_or_paths = _stringify_path(path_or_paths)
if filesystem is None:
# path might be a URI describing the FileSystem as well
try:
filesystem, path_or_paths = FileSystem.from_uri(
path_or_paths)
except ValueError:
filesystem = LocalFileSystem(use_mmap=memory_map)
if filesystem.get_file_info(path_or_paths).is_file:
single_file = path_or_paths
else:
single_file = path_or_paths
if single_file is not None:
self._enable_parallel_column_conversion = True
read_options.update(enable_parallel_column_conversion=True)
parquet_format = ds.ParquetFileFormat(**read_options)
fragment = parquet_format.make_fragment(single_file, filesystem)
self._dataset = ds.FileSystemDataset(
[fragment], schema=fragment.physical_schema,
format=parquet_format,
filesystem=fragment.filesystem
)
return
else:
self._enable_parallel_column_conversion = False
parquet_format = ds.ParquetFileFormat(**read_options)
# check partitioning to enable dictionary encoding
if partitioning == "hive":
partitioning = ds.HivePartitioning.discover(
infer_dictionary=True)
self._dataset = ds.dataset(path_or_paths, filesystem=filesystem,
format=parquet_format,
partitioning=partitioning,
ignore_prefixes=ignore_prefixes)
@property
def schema(self):
return self._dataset.schema
def read(self, columns=None, use_threads=True, use_pandas_metadata=False):
"""
Read (multiple) Parquet files as a single pyarrow.Table.
Parameters
----------
columns : List[str]
Names of columns to read from the dataset. The partition fields
are not automatically included (in contrast to when setting
``use_legacy_dataset=True``).
use_threads : bool, default True
Perform multi-threaded column reads.
use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.
Returns
-------
pyarrow.Table
Content of the file as a table (of columns).
"""
# if use_pandas_metadata, we need to include index columns in the
# column selection, to be able to restore those in the pandas DataFrame
metadata = self.schema.metadata
if columns is not None and use_pandas_metadata:
if metadata and b'pandas' in metadata:
# RangeIndex can be represented as dict instead of column name
index_columns = [
col for col in _get_pandas_index_columns(metadata)
if not isinstance(col, dict)
]
columns = (
list(columns) + list(set(index_columns) - set(columns))
)
if self._enable_parallel_column_conversion:
if use_threads:
# Allow per-column parallelism; would otherwise cause
# contention in the presence of per-file parallelism.
use_threads = False
table = self._dataset.to_table(
columns=columns, filter=self._filter_expression,
use_threads=use_threads
)
# if use_pandas_metadata, restore the pandas metadata (which gets
# lost if doing a specific `columns` selection in to_table)
if use_pandas_metadata:
if metadata and b"pandas" in metadata:
new_metadata = table.schema.metadata or {}
new_metadata.update({b"pandas": metadata[b"pandas"]})
table = table.replace_schema_metadata(new_metadata)
return table
def read_pandas(self, **kwargs):
"""
Read dataset including pandas metadata, if any. Other arguments passed
through to ParquetDataset.read, see docstring for further details.
"""
return self.read(use_pandas_metadata=True, **kwargs)
@property
def pieces(self):
warnings.warn(
_DEPR_MSG.format("ParquetDataset.pieces",
" Use the '.fragments' attribute instead"),
DeprecationWarning, stacklevel=2)
return list(self._dataset.get_fragments())
@property
def fragments(self):
return list(self._dataset.get_fragments())
@property
def files(self):
return self._dataset.files
@property
def filesystem(self):
return self._dataset.filesystem
@property
def partitioning(self):
"""
The partitioning of the Dataset source, if discovered.
"""
return self._dataset.partitioning
_read_table_docstring = """
{0}
Parameters
----------
source : str, pyarrow.NativeFile, or file-like object
If a string passed, can be a single file name or directory name. For
file-like objects, only read a single file. Use pyarrow.BufferReader to
read a file contained in a bytes or buffer-like object.
columns : list
If not None, only these columns will be read from the file. A column
name may be a prefix of a nested field, e.g. 'a' will select 'a.b',
'a.c', and 'a.d.e'. If empty, no columns will be read. Note
that the table will still have the correct num_rows set despite having
no columns.
use_threads : bool, default True
Perform multi-threaded column reads.
metadata : FileMetaData
If separately computed
{1}
use_legacy_dataset : bool, default False
By default, `read_table` uses the new Arrow Datasets API since
pyarrow 1.0.0. Among other things, this allows to pass `filters`
for all columns and not only the partition keys, enables
different partitioning schemes, etc.
Set to True to use the legacy behaviour.
ignore_prefixes : list, optional
Files matching any of these prefixes will be ignored by the
discovery process if use_legacy_dataset=False.
This is matched to the basename of a path.
By default this is ['.', '_'].
Note that discovery happens only if a directory is passed as source.
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem.
filters : List[Tuple] or List[List[Tuple]] or None (default)
Rows which do not match the filter predicate will be removed from scanned
data. Partition keys embedded in a nested directory structure will be
exploited to avoid loading files at all if they contain no matching rows.
If `use_legacy_dataset` is True, filters can only reference partition
keys and only a hive-style directory structure is supported. When
setting `use_legacy_dataset` to False, also within-file level filtering
and different partitioning schemes are supported.
{3}
pre_buffer : bool, default True
Coalesce and issue file reads in parallel to improve performance on
high-latency filesystems (e.g. S3). If True, Arrow will use a
background I/O thread pool. This option is only supported for
use_legacy_dataset=False. If using a filesystem layer that itself
performs readahead (e.g. fsspec's S3FS), disable readahead for best
results.
coerce_int96_timestamp_unit : str, default None.
Cast timestamps that are stored in INT96 format to a particular
resolution (e.g. 'ms'). Setting to None is equivalent to 'ns'
and therefore INT96 timestamps will be infered as timestamps
in nanoseconds.
Returns
-------
{2}
"""
def read_table(source, columns=None, use_threads=True, metadata=None,
use_pandas_metadata=False, memory_map=False,
read_dictionary=None, filesystem=None, filters=None,
buffer_size=0, partitioning="hive", use_legacy_dataset=False,
ignore_prefixes=None, pre_buffer=True,
coerce_int96_timestamp_unit=None):
if not use_legacy_dataset:
if metadata is not None:
raise ValueError(
"The 'metadata' keyword is no longer supported with the new "
"datasets-based implementation. Specify "
"'use_legacy_dataset=True' to temporarily recover the old "
"behaviour."
)
try:
dataset = _ParquetDatasetV2(
source,
filesystem=filesystem,
partitioning=partitioning,
memory_map=memory_map,
read_dictionary=read_dictionary,
buffer_size=buffer_size,
filters=filters,
ignore_prefixes=ignore_prefixes,
pre_buffer=pre_buffer,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
except ImportError:
# fall back on ParquetFile for simple cases when pyarrow.dataset
# module is not available
if filters is not None:
raise ValueError(
"the 'filters' keyword is not supported when the "
"pyarrow.dataset module is not available"
)
if partitioning != "hive":
raise ValueError(
"the 'partitioning' keyword is not supported when the "
"pyarrow.dataset module is not available"
)
filesystem, path = _resolve_filesystem_and_path(source, filesystem)
if filesystem is not None:
source = filesystem.open_input_file(path)
# TODO test that source is not a directory or a list
dataset = ParquetFile(
source, metadata=metadata, read_dictionary=read_dictionary,
memory_map=memory_map, buffer_size=buffer_size,
pre_buffer=pre_buffer,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
return dataset.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
if ignore_prefixes is not None:
raise ValueError(
"The 'ignore_prefixes' keyword is only supported when "
"use_legacy_dataset=False")
if _is_path_like(source):
pf = ParquetDataset(
source, metadata=metadata, memory_map=memory_map,
read_dictionary=read_dictionary,
buffer_size=buffer_size,
filesystem=filesystem, filters=filters,
partitioning=partitioning,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
else:
pf = ParquetFile(
source, metadata=metadata,
read_dictionary=read_dictionary,
memory_map=memory_map,
buffer_size=buffer_size,
coerce_int96_timestamp_unit=coerce_int96_timestamp_unit
)
return pf.read(columns=columns, use_threads=use_threads,
use_pandas_metadata=use_pandas_metadata)
read_table.__doc__ = _read_table_docstring.format(
"""Read a Table from Parquet format
Note: starting with pyarrow 1.0, the default for `use_legacy_dataset` is
switched to False.""",
"\n".join((_read_docstring_common,
"""use_pandas_metadata : bool, default False
If True and file has custom pandas schema metadata, ensure that
index columns are also loaded.""")),
"""pyarrow.Table
Content of the file as a table (of columns)""",
_DNF_filter_doc)
def read_pandas(source, columns=None, **kwargs):
return read_table(
source, columns=columns, use_pandas_metadata=True, **kwargs
)
read_pandas.__doc__ = _read_table_docstring.format(
'Read a Table from Parquet format, also reading DataFrame\n'
'index values if known in the file metadata',
"\n".join((_read_docstring_common,
"""**kwargs
additional options for :func:`read_table`""")),
"""pyarrow.Table
Content of the file as a Table of Columns, including DataFrame
indexes as columns""",
_DNF_filter_doc)
def write_table(table, where, row_group_size=None, version='1.0',
use_dictionary=True, compression='snappy',
write_statistics=True,
use_deprecated_int96_timestamps=None,
coerce_timestamps=None,
allow_truncated_timestamps=False,
data_page_size=None, flavor=None,
filesystem=None,
compression_level=None,
use_byte_stream_split=False,
column_encoding=None,
data_page_version='1.0',
use_compliant_nested_type=False,
**kwargs):
row_group_size = kwargs.pop('chunk_size', row_group_size)
use_int96 = use_deprecated_int96_timestamps
try:
with ParquetWriter(
where, table.schema,
filesystem=filesystem,
version=version,
flavor=flavor,
use_dictionary=use_dictionary,
write_statistics=write_statistics,
coerce_timestamps=coerce_timestamps,
data_page_size=data_page_size,
allow_truncated_timestamps=allow_truncated_timestamps,
compression=compression,
use_deprecated_int96_timestamps=use_int96,
compression_level=compression_level,
use_byte_stream_split=use_byte_stream_split,
column_encoding=column_encoding,
data_page_version=data_page_version,
use_compliant_nested_type=use_compliant_nested_type,
**kwargs) as writer:
writer.write_table(table, row_group_size=row_group_size)
except Exception:
if _is_path_like(where):
try:
os.remove(_stringify_path(where))
except os.error:
pass
raise
write_table.__doc__ = """
Write a Table to Parquet format.
Parameters
----------
table : pyarrow.Table
where : string or pyarrow.NativeFile
row_group_size : int
Maximum size of each written row group. If None, the
row group size will be the minimum of the Table size
and 64 * 1024 * 1024.
{}
**kwargs : optional
Additional options for ParquetWriter
""".format(_parquet_writer_arg_docs)
def _mkdir_if_not_exists(fs, path):
if fs._isfilestore() and not fs.exists(path):
try:
fs.mkdir(path)
except OSError:
assert fs.exists(path)
def write_to_dataset(table, root_path, partition_cols=None,
partition_filename_cb=None, filesystem=None,
use_legacy_dataset=None, **kwargs):
"""Wrapper around parquet.write_table for writing a Table to
Parquet format by partitions.
For each combination of partition columns and values,
a subdirectories are created in the following
manner:
root_dir/
group1=value1
group2=value1
<uuid>.parquet
group2=value2
<uuid>.parquet
group1=valueN
group2=value1
<uuid>.parquet
group2=valueN
<uuid>.parquet
Parameters
----------
table : pyarrow.Table
root_path : str, pathlib.Path
The root directory of the dataset
filesystem : FileSystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
partition_cols : list,
Column names by which to partition the dataset
Columns are partitioned in the order they are given
partition_filename_cb : callable,
A callback function that takes the partition key(s) as an argument
and allow you to override the partition filename. If nothing is
passed, the filename will consist of a uuid.
use_legacy_dataset : bool
Default is True unless a ``pyarrow.fs`` filesystem is passed.
Set to False to enable the new code path (experimental, using the
new Arrow Dataset API). This is more efficient when using partition
columns, but does not (yet) support `partition_filename_cb` and
`metadata_collector` keywords.
**kwargs : dict,
Additional kwargs for write_table function. See docstring for
`write_table` or `ParquetWriter` for more information.
Using `metadata_collector` in kwargs allows one to collect the
file metadata instances of dataset pieces. The file paths in the
ColumnChunkMetaData will be set relative to `root_path`.
"""
if use_legacy_dataset is None:
# if a new filesystem is passed -> default to new implementation
if isinstance(filesystem, FileSystem):
use_legacy_dataset = False
# otherwise the default is still True
else:
use_legacy_dataset = True
if not use_legacy_dataset:
import pyarrow.dataset as ds
# extract non-file format options
schema = kwargs.pop("schema", None)
use_threads = kwargs.pop("use_threads", True)
# raise for unsupported keywords
msg = (
"The '{}' argument is not supported with the new dataset "
"implementation."
)
metadata_collector = kwargs.pop('metadata_collector', None)
file_visitor = None
if metadata_collector is not None:
def file_visitor(written_file):
metadata_collector.append(written_file.metadata)
if partition_filename_cb is not None:
raise ValueError(msg.format("partition_filename_cb"))
# map format arguments
parquet_format = ds.ParquetFileFormat()
write_options = parquet_format.make_write_options(**kwargs)
# map old filesystems to new one
if filesystem is not None:
filesystem = _ensure_filesystem(filesystem)
partitioning = None
if partition_cols:
part_schema = table.select(partition_cols).schema
partitioning = ds.partitioning(part_schema, flavor="hive")
ds.write_dataset(
table, root_path, filesystem=filesystem,
format=parquet_format, file_options=write_options, schema=schema,
partitioning=partitioning, use_threads=use_threads,
file_visitor=file_visitor)
return
fs, root_path = legacyfs.resolve_filesystem_and_path(root_path, filesystem)
_mkdir_if_not_exists(fs, root_path)
metadata_collector = kwargs.pop('metadata_collector', None)
if partition_cols is not None and len(partition_cols) > 0:
df = table.to_pandas()
partition_keys = [df[col] for col in partition_cols]
data_df = df.drop(partition_cols, axis='columns')
data_cols = df.columns.drop(partition_cols)
if len(data_cols) == 0:
raise ValueError('No data left to save outside partition columns')
subschema = table.schema
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
for col in table.schema.names:
if col in partition_cols:
subschema = subschema.remove(subschema.get_field_index(col))
for keys, subgroup in data_df.groupby(partition_keys):
if not isinstance(keys, tuple):
keys = (keys,)
subdir = '/'.join(
['{colname}={value}'.format(colname=name, value=val)
for name, val in zip(partition_cols, keys)])
subtable = pa.Table.from_pandas(subgroup, schema=subschema,
safe=False)
_mkdir_if_not_exists(fs, '/'.join([root_path, subdir]))
if partition_filename_cb:
outfile = partition_filename_cb(keys)
else:
outfile = guid() + '.parquet'
relative_path = '/'.join([subdir, outfile])
full_path = '/'.join([root_path, relative_path])
with fs.open(full_path, 'wb') as f:
write_table(subtable, f, metadata_collector=metadata_collector,
**kwargs)
if metadata_collector is not None:
metadata_collector[-1].set_file_path(relative_path)
else:
if partition_filename_cb:
outfile = partition_filename_cb(None)
else:
outfile = guid() + '.parquet'
full_path = '/'.join([root_path, outfile])
with fs.open(full_path, 'wb') as f:
write_table(table, f, metadata_collector=metadata_collector,
**kwargs)
if metadata_collector is not None:
metadata_collector[-1].set_file_path(outfile)
def write_metadata(schema, where, metadata_collector=None, **kwargs):
"""
Write metadata-only Parquet file from schema. This can be used with
`write_to_dataset` to generate `_common_metadata` and `_metadata` sidecar
files.
Parameters
----------
schema : pyarrow.Schema
where : string or pyarrow.NativeFile
metadata_collector : list
where to collect metadata information.
**kwargs : dict,
Additional kwargs for ParquetWriter class. See docstring for
`ParquetWriter` for more information.
Examples
--------
Write a dataset and collect metadata information.
>>> metadata_collector = []
>>> write_to_dataset(
... table, root_path,
... metadata_collector=metadata_collector, **writer_kwargs)
Write the `_common_metadata` parquet file without row groups statistics.
>>> write_metadata(
... table.schema, root_path / '_common_metadata', **writer_kwargs)
Write the `_metadata` parquet file with row groups statistics.
>>> write_metadata(
... table.schema, root_path / '_metadata',
... metadata_collector=metadata_collector, **writer_kwargs)
"""
writer = ParquetWriter(where, schema, **kwargs)
writer.close()
if metadata_collector is not None:
# ParquetWriter doesn't expose the metadata until it's written. Write
# it and read it again.
metadata = read_metadata(where)
for m in metadata_collector:
metadata.append_row_groups(m)
metadata.write_metadata_file(where)
def read_metadata(where, memory_map=False):
"""
Read FileMetadata from footer of a single Parquet file.
Parameters
----------
where : str (file path) or file-like object
memory_map : bool, default False
Create memory map when the source is a file path.
Returns
-------
metadata : FileMetadata
"""
return ParquetFile(where, memory_map=memory_map).metadata
def read_schema(where, memory_map=False):
"""
Read effective Arrow schema from Parquet file metadata.
Parameters
----------
where : str (file path) or file-like object
memory_map : bool, default False
Create memory map when the source is a file path.
Returns
-------
schema : pyarrow.Schema
"""
return ParquetFile(where, memory_map=memory_map).schema.to_arrow_schema()
| 37.766102
| 84
| 0.621802
|
4a192a443d13fda45694dd9139d3e10fac04331a
| 14,859
|
py
|
Python
|
Lib/site-packages/matplotlib/textpath.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/matplotlib/textpath.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/matplotlib/textpath.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
from collections import OrderedDict
import functools
import logging
import urllib.parse
import numpy as np
from matplotlib import _text_helpers, dviread, font_manager
from matplotlib.font_manager import FontProperties, get_font
from matplotlib.ft2font import LOAD_NO_HINTING, LOAD_TARGET_LIGHT
from matplotlib.mathtext import MathTextParser
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
_log = logging.getLogger(__name__)
class TextToPath:
"""A class that converts strings to paths."""
FONT_SCALE = 100.
DPI = 72
def __init__(self):
self.mathtext_parser = MathTextParser('path')
self._texmanager = None
def _get_font(self, prop):
"""
Find the `FT2Font` matching font properties *prop*, with its size set.
"""
fname = font_manager.findfont(prop)
font = get_font(fname)
font.set_size(self.FONT_SCALE, self.DPI)
return font
def _get_hinting_flag(self):
return LOAD_NO_HINTING
def _get_char_id(self, font, ccode):
"""
Return a unique id for the given font and character-code set.
"""
return urllib.parse.quote(f"{font.postscript_name}-{ccode:x}")
def get_text_width_height_descent(self, s, prop, ismath):
if ismath == "TeX":
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=None)
return w, h, d
fontsize = prop.get_size_in_points()
scale = fontsize / self.FONT_SCALE
if ismath:
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, *_ = \
self.mathtext_parser.parse(s, 72, prop)
return width * scale, height * scale, descent * scale
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
return w * scale, h * scale, d * scale
def get_text_path(self, prop, s, ismath=False):
"""
Convert text *s* to path (a tuple of vertices and codes for
matplotlib.path.Path).
Parameters
----------
prop : `~matplotlib.font_manager.FontProperties`
The font properties for the text.
s : str
The text to be converted.
ismath : {False, True, "TeX"}
If True, use mathtext parser. If "TeX", use tex for rendering.
Returns
-------
verts : list
A list of numpy arrays containing the x and y coordinates of the
vertices.
codes : list
A list of path codes.
Examples
--------
Create a list of vertices and codes from a text, and create a `.Path`
from those::
from matplotlib.path import Path
from matplotlib.textpath import TextToPath
from matplotlib.font_manager import FontProperties
fp = FontProperties(family="Humor Sans", style="italic")
verts, codes = TextToPath().get_text_path(fp, "ABC")
path = Path(verts, codes, closed=False)
Also see `TextPath` for a more direct way to create a path from a text.
"""
if ismath == "TeX":
glyph_info, glyph_map, rects = self.get_glyphs_tex(prop, s)
elif not ismath:
font = self._get_font(prop)
glyph_info, glyph_map, rects = self.get_glyphs_with_font(font, s)
else:
glyph_info, glyph_map, rects = self.get_glyphs_mathtext(prop, s)
verts, codes = [], []
for glyph_id, xposition, yposition, scale in glyph_info:
verts1, codes1 = glyph_map[glyph_id]
if len(verts1):
verts1 = np.array(verts1) * scale + [xposition, yposition]
verts.extend(verts1)
codes.extend(codes1)
for verts1, codes1 in rects:
verts.extend(verts1)
codes.extend(codes1)
# Make sure an empty string or one with nothing to print
# (e.g. only spaces & newlines) will be valid/empty path
if not verts:
verts = np.empty((0, 2))
return verts, codes
def get_glyphs_with_font(self, font, s, glyph_map=None,
return_new_glyphs_only=False):
"""
Convert string *s* to vertices and codes using the provided ttf font.
"""
if glyph_map is None:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
xpositions = []
glyph_ids = []
for item in _text_helpers.layout(s, font):
char_id = self._get_char_id(font, ord(item.char))
glyph_ids.append(char_id)
xpositions.append(item.x)
if char_id not in glyph_map:
glyph_map_new[char_id] = font.get_path()
ypositions = [0] * len(xpositions)
sizes = [1.] * len(xpositions)
rects = []
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, rects)
def get_glyphs_mathtext(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""
Parse mathtext string *s* and convert it to a (vertices, codes) pair.
"""
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.DPI, prop)
if not glyph_map:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
xpositions = []
ypositions = []
glyph_ids = []
sizes = []
for font, fontsize, ccode, ox, oy in glyphs:
char_id = self._get_char_id(font, ccode)
if char_id not in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
font.load_char(ccode, flags=LOAD_NO_HINTING)
glyph_map_new[char_id] = font.get_path()
xpositions.append(ox)
ypositions.append(oy)
glyph_ids.append(char_id)
size = fontsize / self.FONT_SCALE
sizes.append(size)
myrects = []
for ox, oy, w, h in rects:
vert1 = [(ox, oy), (ox, oy + h), (ox + w, oy + h),
(ox + w, oy), (ox, oy), (0, 0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, myrects)
def get_texmanager(self):
"""Return the cached `~.texmanager.TexManager` instance."""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_glyphs_tex(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""Convert the string *s* to vertices and codes using usetex mode."""
# Mostly borrowed from pdf backend.
dvifile = self.get_texmanager().make_dvi(s, self.FONT_SCALE)
with dviread.Dvi(dvifile, self.DPI) as dvi:
page, = dvi
if glyph_map is None:
glyph_map = OrderedDict()
if return_new_glyphs_only:
glyph_map_new = OrderedDict()
else:
glyph_map_new = glyph_map
glyph_ids, xpositions, ypositions, sizes = [], [], [], []
# Gather font information and do some setup for combining
# characters into strings.
for x1, y1, dvifont, glyph, width in page.text:
font, enc = self._get_ps_font_and_encoding(dvifont.texname)
char_id = self._get_char_id(font, glyph)
if char_id not in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
# See comments in _get_ps_font_and_encoding.
if enc is not None:
index = font.get_name_index(enc[glyph])
font.load_glyph(index, flags=LOAD_TARGET_LIGHT)
else:
font.load_char(glyph, flags=LOAD_TARGET_LIGHT)
glyph_map_new[char_id] = font.get_path()
glyph_ids.append(char_id)
xpositions.append(x1)
ypositions.append(y1)
sizes.append(dvifont.size / self.FONT_SCALE)
myrects = []
for ox, oy, h, w in page.boxes:
vert1 = [(ox, oy), (ox + w, oy), (ox + w, oy + h),
(ox, oy + h), (ox, oy), (0, 0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return (list(zip(glyph_ids, xpositions, ypositions, sizes)),
glyph_map_new, myrects)
@staticmethod
@functools.lru_cache(50)
def _get_ps_font_and_encoding(texname):
tex_font_map = dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
psfont = tex_font_map[texname]
if psfont.filename is None:
raise ValueError(
f"No usable font file found for {psfont.psname} ({texname}). "
f"The font may lack a Type-1 version.")
font = get_font(psfont.filename)
if psfont.encoding:
# If psfonts.map specifies an encoding, use it: it gives us a
# mapping of glyph indices to Adobe glyph names; use it to convert
# dvi indices to glyph names and use the FreeType-synthesized
# unicode charmap to convert glyph names to glyph indices (with
# FT_Get_Name_Index/get_name_index), and load the glyph using
# FT_Load_Glyph/load_glyph. (That charmap has a coverage at least
# as good as, and possibly better than, the native charmaps.)
enc = dviread._parse_enc(psfont.encoding)
else:
# If psfonts.map specifies no encoding, the indices directly
# map to the font's "native" charmap; so don't use the
# FreeType-synthesized charmap but the native ones (we can't
# directly identify it but it's typically an Adobe charmap), and
# directly load the dvi glyph indices using FT_Load_Char/load_char.
for charmap_code in [
1094992451, # ADOBE_CUSTOM.
1094995778, # ADOBE_STANDARD.
]:
try:
font.select_charmap(charmap_code)
except (ValueError, RuntimeError):
pass
else:
break
else:
_log.warning("No supported encoding in font (%s).",
psfont.filename)
enc = None
return font, enc
text_to_path = TextToPath()
class TextPath(Path):
"""
Create a path from the text.
"""
def __init__(self, xy, s, size=None, prop=None,
_interpolation_steps=1, usetex=False):
r"""
Create a path from the text. Note that it simply is a path,
not an artist. You need to use the `.PathPatch` (or other artists)
to draw this path onto the canvas.
Parameters
----------
xy : tuple or array of two float values
Position of the text. For no offset, use ``xy=(0, 0)``.
s : str
The text to convert to a path.
size : float, optional
Font size in points. Defaults to the size specified via the font
properties *prop*.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property. If not provided, will use a default
``FontProperties`` with parameters from the
:ref:`rcParams<customizing-with-dynamic-rc-settings>`.
_interpolation_steps : int, optional
(Currently ignored)
usetex : bool, default: False
Whether to use tex rendering.
Examples
--------
The following creates a path from the string "ABC" with Helvetica
font face; and another path from the latex fraction 1/2::
from matplotlib.textpath import TextPath
from matplotlib.font_manager import FontProperties
fp = FontProperties(family="Helvetica", style="italic")
path1 = TextPath((12, 12), "ABC", size=12, prop=fp)
path2 = TextPath((0, 0), r"$\frac{1}{2}$", size=12, usetex=True)
Also see :doc:`/gallery/text_labels_and_annotations/demo_text_path`.
"""
# Circular import.
from matplotlib.text import Text
prop = FontProperties._from_any(prop)
if size is None:
size = prop.get_size_in_points()
self._xy = xy
self.set_size(size)
self._cached_vertices = None
s, ismath = Text(usetex=usetex)._preprocess_math(s)
super().__init__(
*text_to_path.get_text_path(prop, s, ismath=ismath),
_interpolation_steps=_interpolation_steps,
readonly=True)
self._should_simplify = False
def set_size(self, size):
"""Set the text size."""
self._size = size
self._invalid = True
def get_size(self):
"""Get the text size."""
return self._size
@property
def vertices(self):
"""
Return the cached path after updating it if necessary.
"""
self._revalidate_path()
return self._cached_vertices
@property
def codes(self):
"""
Return the codes
"""
return self._codes
def _revalidate_path(self):
"""
Update the path if necessary.
The path for the text is initially create with the font size of
`.FONT_SCALE`, and this path is rescaled to other size when necessary.
"""
if self._invalid or self._cached_vertices is None:
tr = (Affine2D()
.scale(self._size / text_to_path.FONT_SCALE)
.translate(*self._xy))
self._cached_vertices = tr.transform(self._vertices)
self._cached_vertices.flags.writeable = False
self._invalid = False
| 34.002288
| 79
| 0.574063
|
4a192aed91e139345584bb3b14c7c7de858ad814
| 1,435
|
py
|
Python
|
modelr/rock_properties.py
|
kwinkunks/modelr
|
24f338941c3c2fb87e29c0433bc175e63dc21656
|
[
"Apache-2.0"
] | 5
|
2015-04-20T08:55:13.000Z
|
2019-08-13T17:51:43.000Z
|
modelr/rock_properties.py
|
kwinkunks/modelr
|
24f338941c3c2fb87e29c0433bc175e63dc21656
|
[
"Apache-2.0"
] | 2
|
2015-06-04T13:52:38.000Z
|
2016-05-04T19:56:19.000Z
|
modelr/rock_properties.py
|
kwinkunks/modelr
|
24f338941c3c2fb87e29c0433bc175e63dc21656
|
[
"Apache-2.0"
] | 4
|
2016-02-12T15:10:47.000Z
|
2020-12-23T00:46:29.000Z
|
'''
=========================
modelr.rock_properties
=========================
Container for physical rock properties
'''
from bruges.rockphysics import moduli_dict as moduli
class RockProperties(object):
'''
Class to store rock properties.
:param vp: pressure wave velocity
:param vs: shear wave velocity
:param rho: bulk density
'''
def __init__(self, vp, vs=None, rho=None, vp_sig=0,
vs_sig=0, rho_sig=0, units='si'):
# Deal with Imperial units
if units != 'si':
vp = vp * 0.30480
vs = vs * 0.30480
rho = rho * 1000.0
vp_sig = vp_sig * 0.30480
vs_sig = vs_sig * 0.30480
rho_sig = rho_sig * 1000.0
# Deal with missing values
# Simple Vp/Vs ratio
if vs is None:
vs = vp / 2.0
else:
vs = vs
# Gardner equation
if rho is None:
rho = 1000 * 0.23 * (vp * 3.28084)**0.25
else:
rho = rho
# Set properties
self.vp = vp
self.vs = vs
self.rho = rho
self.vp_sig = vp_sig
self.vs_sig = vs_sig
self.rho_sig = rho_sig
def __repr__(self):
return 'RockProperties(vp=%r, rho=%r, vs=%r)' % \
(self.vp, self.rho, self.vs)
def get_moduli(self):
return moduli(vp=self.vp, vs=self.vs, rho=self.rho)
| 21.41791
| 59
| 0.503136
|
4a192b79ec1b5c17e4c6f29d444b8907b79555ff
| 30,562
|
py
|
Python
|
client/commands/v2/tests/start_test.py
|
zhuang-93/pyre-check
|
460b4c7bd81b152f386d6867c963564e1b20f19c
|
[
"MIT"
] | null | null | null |
client/commands/v2/tests/start_test.py
|
zhuang-93/pyre-check
|
460b4c7bd81b152f386d6867c963564e1b20f19c
|
[
"MIT"
] | null | null | null |
client/commands/v2/tests/start_test.py
|
zhuang-93/pyre-check
|
460b4c7bd81b152f386d6867c963564e1b20f19c
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
from pathlib import Path
from typing import Iterable, Tuple
import testslide
from .... import command_arguments, configuration
from ....tests import setup
from ..start import (
Arguments,
CriticalFile,
LoadSavedStateFromFile,
LoadSavedStateFromProject,
StoreSavedStateToFile,
RemoteLogging,
MatchPolicy,
SimpleSourcePath,
BuckSourcePath,
create_server_arguments,
find_watchman_root,
find_buck_root,
get_critical_files,
get_saved_state_action,
get_server_identifier,
get_profiling_log_path,
get_source_path,
background_server_log_file,
ARTIFACT_ROOT_NAME,
)
class ArgumentTest(testslide.TestCase):
def test_serialize_critical_file(self) -> None:
self.assertDictEqual(
CriticalFile(policy=MatchPolicy.BASE_NAME, path="foo").serialize(),
{"base_name": "foo"},
)
self.assertDictEqual(
CriticalFile(policy=MatchPolicy.EXTENSION, path="foo").serialize(),
{"extension": "foo"},
)
self.assertDictEqual(
CriticalFile(policy=MatchPolicy.FULL_PATH, path="/foo/bar").serialize(),
{"full_path": "/foo/bar"},
)
def test_serialize_saved_state_action(self) -> None:
self.assertTupleEqual(
LoadSavedStateFromFile(shared_memory_path="/foo/bar").serialize(),
("load_from_file", {"shared_memory_path": "/foo/bar"}),
)
self.assertTupleEqual(
LoadSavedStateFromFile(
shared_memory_path="/foo/bar", changed_files_path="derp.txt"
).serialize(),
(
"load_from_file",
{"shared_memory_path": "/foo/bar", "changed_files_path": "derp.txt"},
),
)
self.assertTupleEqual(
LoadSavedStateFromProject(project_name="my_project").serialize(),
("load_from_project", {"project_name": "my_project"}),
)
self.assertTupleEqual(
LoadSavedStateFromProject(
project_name="my_project", project_metadata="my_metadata"
).serialize(),
(
"load_from_project",
{"project_name": "my_project", "project_metadata": "my_metadata"},
),
)
self.assertTupleEqual(
StoreSavedStateToFile(shared_memory_path="/foo/bar").serialize(),
("save_to_file", {"shared_memory_path": "/foo/bar"}),
)
def test_serialize_remote_logging(self) -> None:
self.assertDictEqual(
RemoteLogging(logger="/bin/logger").serialize(),
{"logger": "/bin/logger", "identifier": ""},
)
self.assertDictEqual(
RemoteLogging(logger="/bin/logger", identifier="foo").serialize(),
{"logger": "/bin/logger", "identifier": "foo"},
)
def test_serialize_source_paths(self) -> None:
self.assertDictEqual(
SimpleSourcePath(
[
configuration.SimpleSearchPathElement("/source0"),
configuration.SimpleSearchPathElement("/source1"),
]
).serialize(),
{"kind": "simple", "paths": ["/source0", "/source1"]},
)
self.assertDictEqual(
BuckSourcePath(
source_root=Path("/source"),
artifact_root=Path("/artifact"),
checked_directory=Path("/source"),
targets=["//foo:bar", "//foo:baz"],
).serialize(),
{
"kind": "buck",
"source_root": "/source",
"artifact_root": "/artifact",
"targets": ["//foo:bar", "//foo:baz"],
},
)
self.assertDictEqual(
BuckSourcePath(
source_root=Path("/source"),
artifact_root=Path("/artifact"),
checked_directory=Path("/source"),
targets=["//foo:bar"],
mode="opt",
isolation_prefix=".lsp",
).serialize(),
{
"kind": "buck",
"source_root": "/source",
"artifact_root": "/artifact",
"targets": ["//foo:bar"],
"mode": "opt",
"isolation_prefix": ".lsp",
},
)
def test_serialize_arguments(self) -> None:
def assert_serialized(
arguments: Arguments, items: Iterable[Tuple[str, object]]
) -> None:
serialized = arguments.serialize()
for key, value in items:
if key not in serialized:
self.fail(f"Cannot find key `{key}` in serialized arguments")
else:
self.assertEqual(value, serialized[key])
assert_serialized(
Arguments(
log_path="foo",
global_root="bar",
source_paths=SimpleSourcePath(
[configuration.SimpleSearchPathElement("source")]
),
),
[
("log_path", "foo"),
("global_root", "bar"),
("source_paths", {"kind": "simple", "paths": ["source"]}),
],
)
assert_serialized(
Arguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
excludes=["/excludes"],
checked_directory_allowlist=["/allows"],
checked_directory_blocklist=["/blocks"],
extensions=[".typsy"],
taint_models_path=["/taint/model"],
),
[
("excludes", ["/excludes"]),
("checked_directory_allowlist", ["/allows"]),
("checked_directory_blocklist", ["/blocks"]),
("extensions", [".typsy"]),
("taint_model_paths", ["/taint/model"]),
],
)
assert_serialized(
Arguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
debug=True,
strict=True,
show_error_traces=True,
store_type_check_resolution=True,
critical_files=[
CriticalFile(policy=MatchPolicy.BASE_NAME, path="foo.py"),
CriticalFile(policy=MatchPolicy.EXTENSION, path="txt"),
CriticalFile(policy=MatchPolicy.FULL_PATH, path="/home/bar.txt"),
],
),
[
("debug", True),
("strict", True),
("show_error_traces", True),
("store_type_check_resolution", True),
(
"critical_files",
[
{"base_name": "foo.py"},
{"extension": "txt"},
{"full_path": "/home/bar.txt"},
],
),
],
)
assert_serialized(
Arguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
parallel=True,
number_of_workers=20,
),
[("parallel", True), ("number_of_workers", 20)],
)
assert_serialized(
Arguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
relative_local_root="local",
watchman_root=Path("/project"),
),
[("local_root", "/project/local"), ("watchman_root", "/project")],
)
assert_serialized(
Arguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
saved_state_action=LoadSavedStateFromProject(
project_name="my_project", project_metadata="my_metadata"
),
),
[
(
"saved_state_action",
(
"load_from_project",
{
"project_name": "my_project",
"project_metadata": "my_metadata",
},
),
)
],
)
assert_serialized(
Arguments(
log_path="/log",
global_root="/project",
source_paths=SimpleSourcePath(),
additional_logging_sections=["foo", "bar"],
remote_logging=RemoteLogging(logger="/logger", identifier="baz"),
profiling_output=Path("/derp"),
memory_profiling_output=Path("/derp2"),
),
[
("additional_logging_sections", ["foo", "bar"]),
("profiling_output", "/derp"),
("remote_logging", {"logger": "/logger", "identifier": "baz"}),
("memory_profiling_output", "/derp2"),
],
)
class ServerIdentifierTest(testslide.TestCase):
def test_server_identifier(self) -> None:
def assert_server_identifier(
client_configuration: configuration.Configuration, expected: str
) -> None:
self.assertEqual(get_server_identifier(client_configuration), expected)
assert_server_identifier(
configuration.Configuration(
project_root="project", dot_pyre_directory=Path(".pyre")
),
"project",
)
assert_server_identifier(
configuration.Configuration(
project_root="my/project", dot_pyre_directory=Path(".pyre")
),
"project",
)
assert_server_identifier(
configuration.Configuration(
project_root="my/project",
dot_pyre_directory=Path(".pyre"),
relative_local_root="foo",
),
"project/foo",
)
assert_server_identifier(
configuration.Configuration(
project_root="my/project",
dot_pyre_directory=Path(".pyre"),
relative_local_root="foo/bar",
),
"project/foo/bar",
)
class StartTest(testslide.TestCase):
def test_get_critical_files(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project/local"])
setup.write_configuration_file(
root_path, {"critical_files": ["foo", "bar/baz"]}
)
setup.write_configuration_file(
root_path, {"source_directories": ["."]}, relative="local"
)
setup.ensure_files_exist(root_path, ["foo", "bar/baz"])
self.assertCountEqual(
get_critical_files(
configuration.create_configuration(
command_arguments.CommandArguments(local_configuration="local"),
root_path,
)
),
[
CriticalFile(
MatchPolicy.FULL_PATH, str(root_path / ".pyre_configuration")
),
CriticalFile(
MatchPolicy.FULL_PATH,
str(root_path / "local/.pyre_configuration.local"),
),
CriticalFile(MatchPolicy.FULL_PATH, str(root_path / "foo")),
CriticalFile(MatchPolicy.FULL_PATH, str(root_path / "bar/baz")),
],
)
def test_get_saved_state_action(self) -> None:
self.assertIsNone(get_saved_state_action(command_arguments.StartArguments()))
self.assertEqual(
get_saved_state_action(
command_arguments.StartArguments(load_initial_state_from="foo")
),
LoadSavedStateFromFile(shared_memory_path="foo"),
)
self.assertEqual(
get_saved_state_action(
command_arguments.StartArguments(
load_initial_state_from="foo", changed_files_path="bar"
)
),
LoadSavedStateFromFile(shared_memory_path="foo", changed_files_path="bar"),
)
self.assertEqual(
get_saved_state_action(
command_arguments.StartArguments(saved_state_project="my_project")
),
LoadSavedStateFromProject(project_name="my_project"),
)
self.assertEqual(
get_saved_state_action(
command_arguments.StartArguments(saved_state_project="my_project"),
relative_local_root="local/root",
),
LoadSavedStateFromProject(
project_name="my_project", project_metadata="local$root"
),
)
self.assertEqual(
get_saved_state_action(
command_arguments.StartArguments(
load_initial_state_from="foo", changed_files_path="bar"
),
relative_local_root="local/root",
),
LoadSavedStateFromFile(shared_memory_path="foo", changed_files_path="bar"),
)
self.assertEqual(
get_saved_state_action(
command_arguments.StartArguments(save_initial_state_to="/foo")
),
StoreSavedStateToFile(shared_memory_path="/foo"),
)
def test_find_watchman_root(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_files_exist(
root_path,
["foo/qux/derp", "foo/bar/.watchmanconfig", "foo/bar/baz/derp"],
)
expected_root = root_path / "foo/bar"
self.assertEqual(
find_watchman_root(root_path / "foo/bar/baz"), expected_root
)
self.assertEqual(find_watchman_root(root_path / "foo/bar"), expected_root)
self.assertIsNone(find_watchman_root(root_path / "foo/qux"))
self.assertIsNone(find_watchman_root(root_path / "foo"))
self.assertIsNone(find_watchman_root(root_path))
def test_find_buck_root(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_files_exist(
root_path,
["foo/qux/derp", "foo/bar/.buckconfig", "foo/bar/baz/derp"],
)
expected_root = root_path / "foo/bar"
self.assertEqual(find_buck_root(root_path / "foo/bar/baz"), expected_root)
self.assertEqual(find_buck_root(root_path / "foo/bar"), expected_root)
self.assertIsNone(find_buck_root(root_path / "foo/qux"))
self.assertIsNone(find_buck_root(root_path / "foo"))
self.assertIsNone(find_buck_root(root_path))
def test_get_simple_source_path__exists(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "src"])
element = configuration.SimpleSearchPathElement(str(root_path / "src"))
self.assertEqual(
get_source_path(
configuration.Configuration(
project_root=str(root_path / "project"),
dot_pyre_directory=(root_path / ".pyre"),
source_directories=[element],
).filter_nonexistent_paths()
),
SimpleSourcePath([element]),
)
def test_get_simple_source_path__nonexists(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre"])
element = configuration.SimpleSearchPathElement(str(root_path / "src"))
self.assertEqual(
get_source_path(
configuration.Configuration(
project_root=str(root_path / "project"),
dot_pyre_directory=(root_path / ".pyre"),
source_directories=[element],
).filter_nonexistent_paths()
),
SimpleSourcePath([]),
)
def test_get_buck_source_path__global(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "buck_root"])
setup.ensure_files_exist(root_path, ["buck_root/.buckconfig"])
setup.write_configuration_file(
root_path / "buck_root",
{
"targets": ["//ct:marle", "//ct:lucca"],
"buck_mode": "opt",
"isolation_prefix": ".lsp",
},
)
self.assertEqual(
get_source_path(
configuration.create_configuration(
command_arguments.CommandArguments(
dot_pyre_directory=root_path / ".pyre",
),
root_path / "buck_root",
)
),
BuckSourcePath(
source_root=root_path / "buck_root",
artifact_root=root_path / ".pyre" / ARTIFACT_ROOT_NAME,
checked_directory=root_path / "buck_root",
targets=["//ct:marle", "//ct:lucca"],
mode="opt",
isolation_prefix=".lsp",
),
)
def test_get_buck_source_path__local(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project/local"])
setup.ensure_files_exist(root_path, ["project/local/.buckconfig"])
setup.write_configuration_file(
root_path / "project",
{
"buck_mode": "opt",
"isolation_prefix": ".lsp",
},
)
setup.write_configuration_file(
root_path / "project",
{"targets": ["//ct:chrono"]},
relative="local",
)
self.assertEqual(
get_source_path(
configuration.create_configuration(
command_arguments.CommandArguments(
local_configuration="local",
dot_pyre_directory=root_path / ".pyre",
),
root_path / "project",
)
),
BuckSourcePath(
source_root=root_path / "project/local",
artifact_root=root_path / ".pyre" / ARTIFACT_ROOT_NAME / "local",
checked_directory=root_path / "project/local",
targets=["//ct:chrono"],
mode="opt",
isolation_prefix=".lsp",
),
)
def test_get_buck_source_path__no_buck_root(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project"])
with self.assertRaises(configuration.InvalidConfiguration):
get_source_path(
configuration.Configuration(
project_root=str(root_path / "project"),
dot_pyre_directory=(root_path / ".pyre"),
targets=["//ct:frog"],
).filter_nonexistent_paths()
)
def test_get_source_path__no_source_specified(self) -> None:
with self.assertRaises(configuration.InvalidConfiguration):
get_source_path(
configuration.Configuration(
project_root="project",
dot_pyre_directory=Path(".pyre"),
source_directories=None,
targets=None,
).filter_nonexistent_paths()
)
def test_get_source_path__confliciting_source_specified(self) -> None:
with self.assertRaises(configuration.InvalidConfiguration):
get_source_path(
configuration.Configuration(
project_root="project",
dot_pyre_directory=Path(".pyre"),
source_directories=[configuration.SimpleSearchPathElement("src")],
targets=["//ct:ayla"],
).filter_nonexistent_paths()
)
def test_get_checked_directory_for_simple_source_path(self) -> None:
element0 = configuration.SimpleSearchPathElement("ozzie")
element1 = configuration.SubdirectorySearchPathElement("diva", "flea")
element2 = configuration.SitePackageSearchPathElement("super", "slash")
self.assertCountEqual(
SimpleSourcePath(
[element0, element1, element2, element0]
).get_checked_directory_allowlist(),
[element0.path(), element1.path(), element2.path()],
)
def test_get_checked_directory_for_buck_source_path(self) -> None:
self.assertCountEqual(
BuckSourcePath(
source_root=Path("/source"),
artifact_root=Path("/artifact"),
checked_directory=Path("/source/ct"),
targets=[
"//ct:robo",
"//ct:magus",
"future//ct/guardia/...",
"//ct/guardia:schala",
],
).get_checked_directory_allowlist(),
["/source/ct"],
)
def test_create_server_arguments(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(
root_path, [".pyre", "allows", "blocks", "search", "taint", "local/src"]
)
setup.ensure_files_exist(root_path, ["critical", ".watchmanconfig"])
setup.write_configuration_file(
root_path,
{
"do_not_ignore_errors_in": ["allows", "nonexistent"],
"ignore_all_errors": ["blocks", "nonexistent"],
"critical_files": ["critical"],
"exclude": ["exclude"],
"extensions": [".ext", "invalid_extension"],
"workers": 42,
"search_path": ["search", "nonexistent"],
"strict": True,
"taint_models_path": ["taint"],
},
)
setup.write_configuration_file(
root_path, {"source_directories": ["src"]}, relative="local"
)
server_configuration = configuration.create_configuration(
command_arguments.CommandArguments(
local_configuration="local",
dot_pyre_directory=root_path / ".pyre",
),
root_path,
)
self.assertEqual(
create_server_arguments(
server_configuration,
command_arguments.StartArguments(
debug=True,
no_watchman=False,
saved_state_project="project",
sequential=False,
show_error_traces=True,
store_type_check_resolution=True,
),
),
Arguments(
log_path=str(root_path / ".pyre/local"),
global_root=str(root_path),
additional_logging_sections=["server"],
checked_directory_allowlist=[
str(root_path / "local/src"),
str(root_path / "allows"),
],
checked_directory_blocklist=[str(root_path / "blocks")],
critical_files=[
CriticalFile(
MatchPolicy.FULL_PATH,
str(root_path / ".pyre_configuration"),
),
CriticalFile(
MatchPolicy.FULL_PATH,
str(root_path / "local/.pyre_configuration.local"),
),
CriticalFile(
MatchPolicy.FULL_PATH, str(root_path / "critical")
),
],
debug=True,
excludes=["exclude"],
extensions=[".ext"],
relative_local_root="local",
number_of_workers=42,
parallel=True,
python_version=server_configuration.get_python_version(),
saved_state_action=LoadSavedStateFromProject(
project_name="project", project_metadata="local"
),
search_paths=[
configuration.SimpleSearchPathElement(str(root_path / "search"))
],
show_error_traces=True,
source_paths=SimpleSourcePath(
[
configuration.SimpleSearchPathElement(
str(root_path / "local/src")
)
]
),
store_type_check_resolution=True,
strict=True,
taint_models_path=[str(root_path / "taint")],
watchman_root=root_path,
),
)
def test_create_server_arguments_watchman_not_found(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "src"])
setup.write_configuration_file(
root_path,
{"source_directories": ["src"]},
)
arguments = create_server_arguments(
configuration.create_configuration(
command_arguments.CommandArguments(
dot_pyre_directory=root_path / ".pyre",
),
root_path,
),
command_arguments.StartArguments(
no_watchman=False,
),
)
self.assertIsNone(arguments.watchman_root)
def test_create_server_arguments_disable_saved_state(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "src"])
setup.write_configuration_file(
root_path,
{"source_directories": ["src"]},
)
arguments = create_server_arguments(
configuration.create_configuration(
command_arguments.CommandArguments(
dot_pyre_directory=root_path / ".pyre",
),
root_path,
),
command_arguments.StartArguments(
no_saved_state=True, saved_state_project="some/project"
),
)
self.assertIsNone(arguments.saved_state_action)
def test_create_server_arguments_logging(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
log_path = root_path / ".pyre"
logger_path = root_path / "logger"
setup.ensure_directories_exists(root_path, [".pyre", "src"])
setup.ensure_files_exist(root_path, ["logger"])
setup.write_configuration_file(
root_path,
{"source_directories": ["src"], "logger": str(logger_path)},
)
arguments = create_server_arguments(
configuration.create_configuration(
command_arguments.CommandArguments(dot_pyre_directory=log_path),
root_path,
),
command_arguments.StartArguments(
logging_sections="foo,bar,-baz",
noninteractive=True,
enable_profiling=True,
enable_memory_profiling=True,
log_identifier="derp",
),
)
self.assertListEqual(
list(arguments.additional_logging_sections),
["foo", "bar", "-baz", "-progress", "server"],
)
self.assertEqual(
arguments.profiling_output, get_profiling_log_path(log_path)
)
self.assertEqual(
arguments.memory_profiling_output, get_profiling_log_path(log_path)
)
self.assertEqual(
arguments.remote_logging,
RemoteLogging(logger=str(logger_path), identifier="derp"),
)
def test_background_server_log_placement(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
with background_server_log_file(root_path) as log_file:
print("foo", file=log_file)
# Make sure that the log content can be read from a known location.
self.assertEqual(
(root_path / "new_server" / "server.stderr").read_text().strip(), "foo"
)
| 39.536869
| 88
| 0.511158
|
4a192c97d8837d81f550bca1e1b97b8bcbda9b37
| 7,242
|
py
|
Python
|
bentoml/_internal/utils/tensorflow.py
|
joswlv/BentoML
|
3e4e66ebeaec76fb5a43f9897ebf3a60ed6718e4
|
[
"Apache-2.0"
] | null | null | null |
bentoml/_internal/utils/tensorflow.py
|
joswlv/BentoML
|
3e4e66ebeaec76fb5a43f9897ebf3a60ed6718e4
|
[
"Apache-2.0"
] | null | null | null |
bentoml/_internal/utils/tensorflow.py
|
joswlv/BentoML
|
3e4e66ebeaec76fb5a43f9897ebf3a60ed6718e4
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import TYPE_CHECKING, Sequence, TypeVar, Union
from bentoml.exceptions import MissingDependencyException
if TYPE_CHECKING:
# fmt: off
from tensorflow.python.framework.type_spec import ( # noqa # pylint: disable=unused-import
TypeSpec,
)
# fmt: on
logger = logging.getLogger(__name__)
TF_KERAS_DEFAULT_FUNCTIONS = {
"_default_save_signature",
"call_and_return_all_conditional_losses",
}
TENSOR_CLASS_NAMES = (
"RaggedTensor",
"SparseTensor",
"TensorArray",
"EagerTensor",
"Tensor",
)
ST = TypeVar("ST")
def _isinstance_wrapper(obj: ST, sobj: Union[str, type, Sequence]) -> bool:
"""
`isinstance` wrapper to check tensor spec
Args:
obj:
tensor class to check.
sobj:
class used to check with :obj:`obj`. Follows `TENSOR_CLASS_NAME`
Returns:
:obj:`bool`
"""
if not sobj:
return False
if isinstance(sobj, str):
return type(obj).__name__ == sobj.split(".")[-1]
if isinstance(sobj, (tuple, list, set)):
return any(_isinstance_wrapper(obj, k) for k in sobj)
return isinstance(obj, sobj)
def normalize_spec(value: ST) -> "TypeSpec":
"""normalize tensor spec"""
if not _isinstance_wrapper(value, TENSOR_CLASS_NAMES):
return value
import tensorflow as tf
if _isinstance_wrapper(value, "RaggedTensor"):
return tf.RaggedTensorSpec.from_value(value)
if _isinstance_wrapper(value, "SparseTensor"):
return tf.SparseTensorSpec.from_value(value)
if _isinstance_wrapper(value, "TensorArray"):
return tf.TensorArraySpec.from_tensor(value)
if _isinstance_wrapper(value, ("Tensor", "EagerTensor")):
return tf.TensorSpec.from_tensor(value)
return value
def get_input_signatures(func):
if hasattr(func, "function_spec"): # for RestoredFunction
if func.function_spec.input_signature:
return ((func.function_spec.input_signature, {}),)
else:
return tuple(
s
for conc in func.concrete_functions
for s in get_input_signatures(conc)
)
if hasattr(func, "structured_input_signature"): # for ConcreteFunction
if func.structured_input_signature is not None:
return (func.structured_input_signature,)
if func._arg_keywords is not None: # TODO(bojiang): using private API
return (
(
tuple(),
{
k: normalize_spec(v)
for k, v in zip(func._arg_keywords, func.inputs)
},
),
)
return tuple()
def get_arg_names(func):
if hasattr(func, "function_spec"): # for RestoredFunction
return func.function_spec.arg_names
if hasattr(func, "structured_input_signature"): # for ConcreteFunction
return func._arg_keywords
return tuple()
def get_output_signature(func):
if hasattr(func, "function_spec"): # for RestoredFunction
# assume all concrete functions have same signature
return get_output_signature(func.concrete_functions[0])
if hasattr(func, "structured_input_signature"): # for ConcreteFunction
if func.structured_outputs is not None:
if isinstance(func.structured_outputs, dict):
return {
k: normalize_spec(v) for k, v in func.structured_outputs.items()
}
return func.structured_outputs
else:
return tuple(normalize_spec(v) for v in func.outputs)
return tuple()
def get_restored_functions(m):
function_map = {k: getattr(m, k, None) for k in dir(m)}
return {
k: v
for k, v in function_map.items()
if k not in TF_KERAS_DEFAULT_FUNCTIONS and hasattr(v, "function_spec")
}
def get_serving_default_function(m):
try:
import tensorflow as tf
except ImportError:
raise MissingDependencyException(
"Tensorflow package is required to use TfSavedModelArtifact"
)
return m.signatures.get(tf.compat.v2.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
def cast_tensor_by_spec(_input, spec):
"""
transform dtype & shape following spec
"""
try:
import tensorflow as tf
except ImportError:
raise MissingDependencyException(
"Tensorflow package is required to use TfSavedModelArtifact"
)
if not _isinstance_wrapper(spec, "TensorSpec"):
return _input
if _isinstance_wrapper(_input, ["Tensor", "EagerTensor"]):
# TensorFlow issue #43038
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
return tf.cast(_input, dtype=spec.dtype, name=spec.name)
else:
return tf.constant(_input, dtype=spec.dtype, name=spec.name)
def _pretty_format_function_call(base, name, arg_names):
if arg_names:
part_sigs = ", ".join(f"{k}" for k in arg_names)
else:
part_sigs = ""
if name == "__call__":
return f"{base}({part_sigs})"
return f"{base}.{name}({part_sigs})"
def _pretty_format_positional(positional):
return f'Positional arguments ({len(positional)} total):\n * \n{" * ".join(str(a) for a in positional)}' # noqa
def pretty_format_function(function, obj="<object>", name="<function>"):
ret = ""
outs = get_output_signature(function)
sigs = get_input_signatures(function)
arg_names = get_arg_names(function)
if hasattr(function, "function_spec"):
arg_names = function.function_spec.arg_names
else:
arg_names = function._arg_keywords
ret += _pretty_format_function_call(obj, name, arg_names)
ret += "\n------------\n"
signature_descriptions = []
for index, sig in enumerate(sigs):
positional, keyword = sig
signature_descriptions.append(
"Arguments Option {}:\n {}\n Keyword arguments:\n {}".format(
index + 1, _pretty_format_positional(positional), keyword
)
)
ret += "\n\n".join(signature_descriptions)
ret += f"\n\nReturn:\n {outs}\n\n"
return ret
def pretty_format_restored_model(model):
part_functions = ""
restored_functions = get_restored_functions(model)
for name, func in restored_functions.items():
part_functions += pretty_format_function(func, "model", name)
part_functions += "\n"
serving_default = get_serving_default_function(model)
if serving_default:
part_functions += pretty_format_function(
serving_default, "model", "signature['serving_default']"
)
part_functions += "\n"
if not restored_functions and not serving_default:
return (
"No serving function was found in the saved model. "
"In the model implementation, use `tf.function` decorator to mark "
"the method needed for model serving. \n"
"Find more details in related TensorFlow docs here "
"https://www.tensorflow.org/api_docs/python/tf/saved_model/save"
)
return f"Found restored functions:\n{part_functions}"
| 30.556962
| 122
| 0.640431
|
4a192e35c61f6874e92fe39403f9f299220b6c2b
| 5,245
|
py
|
Python
|
PaddleCV/PaddleDetection/tools/eval.py
|
Jie5216/models-1
|
4e3828a606d0cc3f1e93e9716a71a10765a88dfe
|
[
"Apache-2.0"
] | 1
|
2019-08-09T06:54:02.000Z
|
2019-08-09T06:54:02.000Z
|
PaddleCV/PaddleDetection/tools/eval.py
|
Jie5216/models-1
|
4e3828a606d0cc3f1e93e9716a71a10765a88dfe
|
[
"Apache-2.0"
] | null | null | null |
PaddleCV/PaddleDetection/tools/eval.py
|
Jie5216/models-1
|
4e3828a606d0cc3f1e93e9716a71a10765a88dfe
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import multiprocessing
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
import paddle.fluid as fluid
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu
from ppdet.modeling.model_input import create_feed
from ppdet.data.data_feed import create_reader
from ppdet.core.workspace import load_config, merge_config, create
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def main():
"""
Main evaluate function
"""
cfg = load_config(FLAGS.config)
if 'architecture' in cfg:
main_arch = cfg.architecture
else:
raise ValueError("'architecture' not specified in config file.")
merge_config(FLAGS.opt)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed')
else:
eval_feed = create(cfg.eval_feed)
# define executor
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# build program
model = create(main_arch)
startup_prog = fluid.Program()
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
pyreader, feed_vars = create_feed(eval_feed)
fetches = model.eval(feed_vars)
eval_prog = eval_prog.clone(True)
reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
pyreader.decorate_sample_list_generator(reader, place)
# eval already exists json file
if FLAGS.json_eval:
json_eval_results(
eval_feed, cfg.metric, json_directory=FLAGS.output_eval)
return
# compile program for multi-devices
if devices_num <= 1:
compile_program = fluid.compiler.CompiledProgram(eval_prog)
else:
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
compile_program = fluid.compiler.CompiledProgram(
eval_prog).with_data_parallel(build_strategy=build_strategy)
# load model
exe.run(startup_prog)
if 'weights' in cfg:
checkpoint.load_pretrain(exe, eval_prog, cfg.weights)
assert cfg.metric in ['COCO', 'VOC'], \
"unknown metric type {}".format(cfg.metric)
extra_keys = []
if cfg.metric == 'COCO':
extra_keys = ['im_info', 'im_id', 'im_shape']
if cfg.metric == 'VOC':
extra_keys = ['gt_box', 'gt_label', 'is_difficult']
keys, values, cls = parse_fetches(fetches, eval_prog, extra_keys)
# whether output bbox is normalized in model output layer
is_bbox_normalized = False
if hasattr(model, 'is_bbox_normalized') and \
callable(model.is_bbox_normalized):
is_bbox_normalized = model.is_bbox_normalized()
results = eval_run(exe, compile_program, pyreader, keys, values, cls)
# evaluation
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
eval_results(results, eval_feed, cfg.metric, cfg.num_classes, resolution,
is_bbox_normalized, FLAGS.output_eval)
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"--json_eval",
action='store_true',
default=False,
help="Whether to re eval with already exists bbox.json or mask.json")
parser.add_argument(
"-d",
"--dataset_dir",
default=None,
type=str,
help="Dataset path, same as DataFeed.dataset.dataset_dir")
parser.add_argument(
"-f",
"--output_eval",
default=None,
type=str,
help="Evaluation file directory, default is current directory.")
FLAGS = parser.parse_args()
main()
| 32.78125
| 91
| 0.693041
|
4a192efbc96f0f23b36c71d4bbd9671e27dae76d
| 1,672
|
py
|
Python
|
package/spack-r-dicekriging/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-r-dicekriging/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-r-dicekriging/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDicekriging(RPackage):
"""Estimation, validation and prediction of kriging models. Important
functions : km, print.km, plot.km, predict.km."""
homepage = "http://dice.emse.fr/"
url = "https://cran.r-project.org/src/contrib/DiceKriging_1.5.5.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/DiceKriging"
version('1.5.5', 'ee3e2d7a91d4a712467ef4f0b69c2844')
| 45.189189
| 80
| 0.677632
|
4a192f5e3dc916e0ae06cfb5a7ad378e1d679ef9
| 1,630
|
py
|
Python
|
src/decisionengine/framework/util/tests/fixtures.py
|
moibenko/decisionengine
|
4c458e0c225ec2ce1e82d56e752724983331b7d1
|
[
"Apache-2.0"
] | null | null | null |
src/decisionengine/framework/util/tests/fixtures.py
|
moibenko/decisionengine
|
4c458e0c225ec2ce1e82d56e752724983331b7d1
|
[
"Apache-2.0"
] | null | null | null |
src/decisionengine/framework/util/tests/fixtures.py
|
moibenko/decisionengine
|
4c458e0c225ec2ce1e82d56e752724983331b7d1
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
import pytest
__all__ = ["prometheus_env_setup", "Gauge", "Counter", "OtherMetric"]
@pytest.fixture(autouse=True)
def prometheus_env_setup(tmp_path, monkeypatch):
"""Make sure we have a directory set for PROMETHEUS_MULTIPROC_DIR so that
metric instantiation gives us multiprocess metrics"""
# Get a fixed dir
d = tmp_path
monkeypatch.setenv("PROMETHEUS_MULTIPROC_DIR", str(d))
yield
monkeypatch.delenv("PROMETHEUS_MULTIPROC_DIR")
# Put these metrics declarations in fixtures because we need the env_setup
# fixture to be set before importing any of the metrics
@pytest.fixture()
def Gauge():
from decisionengine.framework.util.metrics import Gauge
pytest.gauge_default_multiproc_mode = Gauge._DEFAULT_MULTIPROC_MODE
def _gauge(*args, **kwargs):
return Gauge(*args, **kwargs)
yield _gauge
@pytest.fixture()
def Counter():
from decisionengine.framework.util.metrics import Counter
def _counter(*args, **kwargs):
return Counter(*args, **kwargs)
yield _counter
@pytest.fixture()
def OtherMetric():
from decisionengine.framework.util.metrics import Histogram, Summary
def _decider(metric_type):
if metric_type == "histogram":
def _histogram(*args, **kwargs):
return Histogram(*args, **kwargs)
return _histogram
elif metric_type == "summary":
def _summary(*args, **kwargs):
return Summary(*args, **kwargs)
return _summary
yield _decider
| 25.873016
| 77
| 0.693865
|
4a192f6b754bad4ffc3c8493bd5e5cff7624a2f6
| 3,293
|
py
|
Python
|
test/test_configuration.py
|
mmadsen/axelrod-ct
|
90ea4319dd571546888c4d2a50255514e7d7fb94
|
[
"Apache-2.0"
] | 5
|
2015-05-03T08:49:11.000Z
|
2022-03-23T11:44:00.000Z
|
test/test_configuration.py
|
mmadsen/axelrod-ct
|
90ea4319dd571546888c4d2a50255514e7d7fb94
|
[
"Apache-2.0"
] | null | null | null |
test/test_configuration.py
|
mmadsen/axelrod-ct
|
90ea4319dd571546888c4d2a50255514e7d7fb94
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
import unittest
import madsenlab.axelrod.utils as utils
import os
import tempfile
class ConfigurationTest(unittest.TestCase):
filename = "test"
def setUp(self):
self.tf = tempfile.NamedTemporaryFile(dir="/tmp", delete=False)
self.tf.write("""
{
"REPLICATIONS_PER_PARAM_SET" : 5,
"POPULATION_SIZES_STUDIED" : [500,1000],
"NUMBER_OF_DIMENSIONS_OR_FEATURES" : [1,2,4,8,16],
"NUMBER_OF_TRAITS_PER_DIMENSION" : [2,3,4,6,8,12,16,32]
}
""")
self.tf.flush()
def tearDown(self):
os.remove(self.tf.name)
def test_configuration(self):
print "tempfile: %s" % self.tf.name
config = utils.AxelrodConfiguration(self.tf.name)
print "configured REPLICATIONS_PER_PARAM_SET: %s" % config.REPLICATIONS_PER_PARAM_SET
self.assertEqual(5, config.REPLICATIONS_PER_PARAM_SET, "Config file value does not match")
def test_latex_output(self):
config = utils.AxelrodConfiguration(self.tf.name)
table = config.to_latex_table("test")
print table
def test_pandoc_output(self):
config = utils.AxelrodConfiguration(self.tf.name)
table = config.to_pandoc_table("test")
print table
class TreeConfigurationTest(unittest.TestCase):
def setUp(self):
self.tf = tempfile.NamedTemporaryFile(dir="/tmp", delete=False)
self.tf.write("""{
"REPLICATIONS_PER_PARAM_SET" : 10,
"POPULATION_SIZES_STUDIED" : [100],
"TRAIT_LEARNING_RATE" : [0.1, 0.2, 0.3, 0.5],
"TRAIT_LOSS_RATE" : [0.00001, 0.00005, 0.0001, 0.00002],
"INNOVATION_RATE" : [0.00001, 0.00005, 0.0001, 0.00002],
"MAXIMUM_INITIAL_TRAITS" : [4,8,16,32],
"NUM_TRAIT_TREES" : [4,8,12,16],
"TREE_BRANCHING_FACTOR" : [2,3,4,6],
"TREE_DEPTH_FACTOR" : [3,4,6,8],
"SIMULATION_CUTOFF_TIME" : 3000000,
"POPULATION_STRUCTURE_CLASS" : "madsenlab.axelrod.population.TreeTraitStructurePopulation",
"INTERACTION_RULE_CLASS" : "madsenlab.axelrod.rules.MultipleTreePrerequisitesLearningCopyingRule",
"NETWORK_FACTORY_CLASS" : "madsenlab.axelrod.population.SquareLatticeFactory",
"TRAIT_FACTORY_CLASS" : "madsenlab.axelrod.traits.MultipleBalancedTreeStructuredTraitFactory"
}
""")
self.tf.flush()
def tearDown(self):
os.remove(self.tf.name)
def test_configuration(self):
print "tempfile: %s" % self.tf.name
config = utils.TreeStructuredConfiguration(self.tf.name)
print "configured REPLICATIONS_PER_PARAM_SET: %s" % config.REPLICATIONS_PER_PARAM_SET
self.assertEqual(10, config.REPLICATIONS_PER_PARAM_SET, "Config file value does not match")
def test_latex_output(self):
config = utils.TreeStructuredConfiguration(self.tf.name)
table = config.to_latex_table("test")
print table
def test_pandoc_output(self):
config = utils.TreeStructuredConfiguration(self.tf.name)
table = config.to_pandoc_table("test")
print table
if __name__ == "__main__":
unittest.main()
| 29.401786
| 119
| 0.681445
|
4a193006755101ee451d82a7a6b9cac61c37dfdb
| 19,283
|
py
|
Python
|
airflow/providers/google/cloud/operators/functions.py
|
MohammedDiab/airflow
|
4fa7df5de9d9bfe0509722599baf1be91ffea3b8
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/google/cloud/operators/functions.py
|
MohammedDiab/airflow
|
4fa7df5de9d9bfe0509722599baf1be91ffea3b8
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/google/cloud/operators/functions.py
|
MohammedDiab/airflow
|
4fa7df5de9d9bfe0509722599baf1be91ffea3b8
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google Cloud Functions operators.
"""
import re
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient.errors import HttpError
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.functions import CloudFunctionsHook
from airflow.providers.google.cloud.utils.field_validator import (
GcpBodyFieldValidator, GcpFieldValidationException,
)
from airflow.utils.decorators import apply_defaults
from airflow.version import version
def _validate_available_memory_in_mb(value):
if int(value) <= 0:
raise GcpFieldValidationException("The available memory has to be greater than 0")
def _validate_max_instances(value):
if int(value) <= 0:
raise GcpFieldValidationException(
"The max instances parameter has to be greater than 0")
CLOUD_FUNCTION_VALIDATION = [
dict(name="name", regexp="^.+$"),
dict(name="description", regexp="^.+$", optional=True),
dict(name="entryPoint", regexp=r'^.+$', optional=True),
dict(name="runtime", regexp=r'^.+$', optional=True),
dict(name="timeout", regexp=r'^.+$', optional=True),
dict(name="availableMemoryMb", custom_validation=_validate_available_memory_in_mb,
optional=True),
dict(name="labels", optional=True),
dict(name="environmentVariables", optional=True),
dict(name="network", regexp=r'^.+$', optional=True),
dict(name="maxInstances", optional=True, custom_validation=_validate_max_instances),
dict(name="source_code", type="union", fields=[
dict(name="sourceArchiveUrl", regexp=r'^.+$'),
dict(name="sourceRepositoryUrl", regexp=r'^.+$', api_version='v1beta2'),
dict(name="sourceRepository", type="dict", fields=[
dict(name="url", regexp=r'^.+$')
]),
dict(name="sourceUploadUrl")
]),
dict(name="trigger", type="union", fields=[
dict(name="httpsTrigger", type="dict", fields=[
# This dict should be empty at input (url is added at output)
]),
dict(name="eventTrigger", type="dict", fields=[
dict(name="eventType", regexp=r'^.+$'),
dict(name="resource", regexp=r'^.+$'),
dict(name="service", regexp=r'^.+$', optional=True),
dict(name="failurePolicy", type="dict", optional=True, fields=[
dict(name="retry", type="dict", optional=True)
])
])
]),
] # type: List[Dict[str, Any]]
class CloudFunctionDeployFunctionOperator(BaseOperator):
"""
Creates a function in Google Cloud Functions.
If a function with this name already exists, it will be updated.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFunctionDeployFunctionOperator`
:param location: Google Cloud Platform region where the function should be created.
:type location: str
:param body: Body of the Cloud Functions definition. The body must be a
Cloud Functions dictionary as described in:
https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions
. Different API versions require different variants of the Cloud Functions
dictionary.
:type body: dict or google.cloud.functions.v1.CloudFunction
:param project_id: (Optional) Google Cloud Platform project ID where the function
should be created.
:type project_id: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud
Platform - default 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: (Optional) API version used (for example v1 - default - or
v1beta1).
:type api_version: str
:param zip_path: Path to zip file containing source code of the function. If the path
is set, the sourceUploadUrl should not be specified in the body or it should
be empty. Then the zip file will be uploaded using the upload URL generated
via generateUploadUrl from the Cloud Functions API.
:type zip_path: str
:param validate_body: If set to False, body validation is not performed.
:type validate_body: bool
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcf_function_deploy_template_fields]
template_fields = ('body', 'project_id', 'location', 'gcp_conn_id', 'api_version',
'impersonation_chain',)
# [END gcf_function_deploy_template_fields]
@apply_defaults
def __init__(self, *,
location: str,
body: Dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
zip_path: Optional[str] = None,
validate_body: bool = True,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs) -> None:
self.project_id = project_id
self.location = location
self.body = body
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.zip_path = zip_path
self.zip_path_preprocessor = ZipPathPreprocessor(body, zip_path)
self._field_validator = None # type: Optional[GcpBodyFieldValidator]
self.impersonation_chain = impersonation_chain
if validate_body:
self._field_validator = GcpBodyFieldValidator(CLOUD_FUNCTION_VALIDATION,
api_version=api_version)
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self):
if not self.location:
raise AirflowException("The required parameter 'location' is missing")
if not self.body:
raise AirflowException("The required parameter 'body' is missing")
self.zip_path_preprocessor.preprocess_body()
def _validate_all_body_fields(self):
if self._field_validator:
self._field_validator.validate(self.body)
def _create_new_function(self, hook):
hook.create_new_function(
project_id=self.project_id,
location=self.location,
body=self.body)
def _update_function(self, hook):
hook.update_function(self.body['name'], self.body, self.body.keys())
def _check_if_function_exists(self, hook):
name = self.body.get('name')
if not name:
raise GcpFieldValidationException("The 'name' field should be present in "
"body: '{}'.".format(self.body))
try:
hook.get_function(name)
except HttpError as e:
status = e.resp.status
if status == 404:
return False
raise e
return True
def _upload_source_code(self, hook):
return hook.upload_function_zip(project_id=self.project_id,
location=self.location,
zip_path=self.zip_path)
def _set_airflow_version_label(self):
if 'labels' not in self.body.keys():
self.body['labels'] = {}
self.body['labels'].update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')})
def execute(self, context):
hook = CloudFunctionsHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
if self.zip_path_preprocessor.should_upload_function():
self.body[GCF_SOURCE_UPLOAD_URL] = self._upload_source_code(hook)
self._validate_all_body_fields()
self._set_airflow_version_label()
if not self._check_if_function_exists(hook):
self._create_new_function(hook)
else:
self._update_function(hook)
GCF_SOURCE_ARCHIVE_URL = 'sourceArchiveUrl'
GCF_SOURCE_UPLOAD_URL = 'sourceUploadUrl'
SOURCE_REPOSITORY = 'sourceRepository'
GCF_ZIP_PATH = 'zip_path'
class ZipPathPreprocessor:
"""
Pre-processes zip path parameter.
Responsible for checking if the zip path parameter is correctly specified in
relation with source_code body fields. Non empty zip path parameter is special because
it is mutually exclusive with sourceArchiveUrl and sourceRepository body fields.
It is also mutually exclusive with non-empty sourceUploadUrl.
The pre-process modifies sourceUploadUrl body field in special way when zip_path
is not empty. An extra step is run when execute method is called and sourceUploadUrl
field value is set in the body with the value returned by generateUploadUrl Cloud
Function API method.
:param body: Body passed to the create/update method calls.
:type body: dict
:param zip_path: (optional) Path to zip file containing source code of the function. If the path
is set, the sourceUploadUrl should not be specified in the body or it should
be empty. Then the zip file will be uploaded using the upload URL generated
via generateUploadUrl from the Cloud Functions API.
:type zip_path: str
"""
upload_function = None # type: Optional[bool]
def __init__(self, body: dict, zip_path: Optional[str] = None) -> None:
self.body = body
self.zip_path = zip_path
@staticmethod
def _is_present_and_empty(dictionary, field):
return field in dictionary and not dictionary[field]
def _verify_upload_url_and_no_zip_path(self):
if self._is_present_and_empty(self.body, GCF_SOURCE_UPLOAD_URL):
if not self.zip_path:
raise AirflowException(
"Parameter '{url}' is empty in the body and argument '{path}' "
"is missing or empty. You need to have non empty '{path}' "
"when '{url}' is present and empty.".format(
url=GCF_SOURCE_UPLOAD_URL,
path=GCF_ZIP_PATH)
)
def _verify_upload_url_and_zip_path(self):
if GCF_SOURCE_UPLOAD_URL in self.body and self.zip_path:
if not self.body[GCF_SOURCE_UPLOAD_URL]:
self.upload_function = True
else:
raise AirflowException("Only one of '{}' in body or '{}' argument "
"allowed. Found both."
.format(GCF_SOURCE_UPLOAD_URL, GCF_ZIP_PATH))
def _verify_archive_url_and_zip_path(self):
if GCF_SOURCE_ARCHIVE_URL in self.body and self.zip_path:
raise AirflowException("Only one of '{}' in body or '{}' argument "
"allowed. Found both."
.format(GCF_SOURCE_ARCHIVE_URL, GCF_ZIP_PATH))
def should_upload_function(self) -> bool:
"""
Checks if function source should be uploaded.
:rtype: bool
"""
if self.upload_function is None:
raise AirflowException('validate() method has to be invoked before '
'should_upload_function')
return self.upload_function
def preprocess_body(self):
"""
Modifies sourceUploadUrl body field in special way when zip_path
is not empty.
"""
self._verify_archive_url_and_zip_path()
self._verify_upload_url_and_zip_path()
self._verify_upload_url_and_no_zip_path()
if self.upload_function is None:
self.upload_function = False
FUNCTION_NAME_PATTERN = '^projects/[^/]+/locations/[^/]+/functions/[^/]+$'
FUNCTION_NAME_COMPILED_PATTERN = re.compile(FUNCTION_NAME_PATTERN)
class CloudFunctionDeleteFunctionOperator(BaseOperator):
"""
Deletes the specified function from Google Cloud Functions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFunctionDeleteFunctionOperator`
:param name: A fully-qualified function name, matching
the pattern: `^projects/[^/]+/locations/[^/]+/functions/[^/]+$`
:type name: str
:param gcp_conn_id: The connection ID to use to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param api_version: API version used (for example v1 or v1beta1).
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gcf_function_delete_template_fields]
template_fields = ('name', 'gcp_conn_id', 'api_version', 'impersonation_chain',)
# [END gcf_function_delete_template_fields]
@apply_defaults
def __init__(self, *,
name: str,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs) -> None:
self.name = name
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self):
if not self.name:
raise AttributeError('Empty parameter: name')
else:
pattern = FUNCTION_NAME_COMPILED_PATTERN
if not pattern.match(self.name):
raise AttributeError(
'Parameter name must match pattern: {}'.format(FUNCTION_NAME_PATTERN))
def execute(self, context):
hook = CloudFunctionsHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
try:
return hook.delete_function(self.name)
except HttpError as e:
status = e.resp.status
if status == 404:
self.log.info('The function does not exist in this project')
else:
self.log.error('An error occurred. Exiting.')
raise e
class CloudFunctionInvokeFunctionOperator(BaseOperator):
"""
Invokes a deployed Cloud Function. To be used for testing
purposes as very limited traffic is allowed.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFunctionDeployFunctionOperator`
:param function_id: ID of the function to be called
:type function_id: str
:param input_data: Input to be passed to the function
:type input_data: Dict
:param location: The location where the function is located.
:type location: str
:param project_id: Optional, Google Cloud Project project_id where the function belongs.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
:return: None
"""
template_fields = ('function_id', 'input_data', 'location', 'project_id',
'impersonation_chain',)
@apply_defaults
def __init__(
self, *,
function_id: str,
input_data: Dict,
location: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs
) -> None:
super().__init__(**kwargs)
self.function_id = function_id
self.input_data = input_data
self.location = location
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
def execute(self, context: Dict):
hook = CloudFunctionsHook(
api_version=self.api_version,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info('Calling function %s.', self.function_id)
result = hook.call_function(
function_id=self.function_id,
input_data=self.input_data,
location=self.location,
project_id=self.project_id
)
self.log.info('Function called successfully. Execution id %s', result.get('executionId', None))
self.xcom_push(context=context, key='execution_id', value=result.get('executionId', None))
return result
| 42.756098
| 103
| 0.658507
|
4a19301deaf612d4c4129390e18b1339437feb72
| 4,425
|
py
|
Python
|
v0.7/medical_imaging/3d-unet/run.py
|
kahmed10/inference
|
4ab7690e3ebbc1a1ba56bf492437f3de92119758
|
[
"Apache-2.0"
] | null | null | null |
v0.7/medical_imaging/3d-unet/run.py
|
kahmed10/inference
|
4ab7690e3ebbc1a1ba56bf492437f3de92119758
|
[
"Apache-2.0"
] | null | null | null |
v0.7/medical_imaging/3d-unet/run.py
|
kahmed10/inference
|
4ab7690e3ebbc1a1ba56bf492437f3de92119758
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.getcwd())
import argparse
import mlperf_loadgen as lg
import subprocess
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--backend",
choices=["pytorch", "onnxruntime", "tf"],
default="pytorch",
help="Backend")
parser.add_argument(
"--scenario",
choices=["SingleStream", "Offline", "Server", "MultiStream"],
default="Offline",
help="Scenario")
parser.add_argument("--accuracy",
action="store_true",
help="enable accuracy pass")
parser.add_argument("--mlperf_conf",
default="build/mlperf.conf",
help="mlperf rules config")
parser.add_argument("--user_conf",
default="user.conf",
help="mlperf rules config")
parser.add_argument(
"--model_dir",
default=
"build/result/nnUNet/3d_fullres/Task043_BraTS2019/nnUNetTrainerV2__nnUNetPlansv2.mlperf.1",
help="Path to the directory containing plans.pkl")
parser.add_argument("--model", help="Path to the ONNX or TF model")
parser.add_argument("--preprocessed_data_dir",
default="build/preprocessed_data",
help="path to preprocessed data")
parser.add_argument("--performance_count",
type=int,
default=16,
help="performance count")
args = parser.parse_args()
return args
scenario_map = {
"SingleStream": lg.TestScenario.SingleStream,
"Offline": lg.TestScenario.Offline,
"Server": lg.TestScenario.Server,
"MultiStream": lg.TestScenario.MultiStream
}
def main():
args = get_args()
if args.backend == "pytorch":
from pytorch_SUT import get_pytorch_sut
sut = get_pytorch_sut(args.model_dir, args.preprocessed_data_dir,
args.performance_count)
elif args.backend == "onnxruntime":
from onnxruntime_SUT import get_onnxruntime_sut
sut = get_onnxruntime_sut(args.model, args.preprocessed_data_dir,
args.performance_count)
elif args.backend == "tf":
from tf_SUT import get_tf_sut
sut = get_tf_sut(args.model, args.preprocessed_data_dir,
args.performance_count)
else:
raise ValueError("Unknown backend: {:}".format(args.backend))
settings = lg.TestSettings()
settings.scenario = scenario_map[args.scenario]
settings.FromConfig(args.mlperf_conf, "3d-unet", args.scenario)
settings.FromConfig(args.user_conf, "3d-unet", args.scenario)
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
else:
settings.mode = lg.TestMode.PerformanceOnly
log_path = "build/logs"
if not os.path.exists(log_path):
os.makedirs(log_path)
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = log_path
log_output_settings.copy_summary_to_stdout = True
log_settings = lg.LogSettings()
log_settings.log_output = log_output_settings
print("Running Loadgen test...")
lg.StartTestWithLogSettings(sut.sut, sut.qsl.qsl, settings, log_settings)
if args.accuracy:
print("Running accuracy script...")
cmd = "python3 brats_eval.py"
subprocess.check_call(cmd, shell=True)
print("Done!")
print("Destroying SUT...")
lg.DestroySUT(sut.sut)
print("Destroying QSL...")
lg.DestroyQSL(sut.qsl.qsl)
if __name__ == "__main__":
main()
| 34.84252
| 111
| 0.642712
|
4a19303ee7b011f638b707a750972783e6d375f9
| 21,528
|
py
|
Python
|
venv/Lib/site-packages/sklearn/tests/test_calibration.py
|
baleshwari/Predict-Zomato-Restaurant-Ratings
|
32505433ebcd8358c52b22775cc146f875c4e190
|
[
"BSD-3-Clause"
] | 13
|
2020-05-03T18:42:05.000Z
|
2022-03-23T07:44:19.000Z
|
venv/Lib/site-packages/sklearn/tests/test_calibration.py
|
baleshwari/Predict-Zomato-Restaurant-Ratings
|
32505433ebcd8358c52b22775cc146f875c4e190
|
[
"BSD-3-Clause"
] | 25
|
2020-11-16T15:36:41.000Z
|
2021-06-01T05:15:31.000Z
|
venv/Lib/site-packages/sklearn/tests/test_calibration.py
|
baleshwari/Predict-Zomato-Restaurant-Ratings
|
32505433ebcd8358c52b22775cc146f875c4e190
|
[
"BSD-3-Clause"
] | 8
|
2020-10-05T20:56:08.000Z
|
2020-10-27T23:30:03.000Z
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
assert_raises, ignore_warnings)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@pytest.fixture(scope="module")
def data():
X, y = make_classification(
n_samples=200, n_features=6, random_state=42
)
return X, y
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
assert_raises(ValueError, cal_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf,
prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf,
1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss((y_test + 1) % 2,
prob_pos_cal_clf_relabeled))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_bad_method(data, ensemble):
# Check only "isotonic" and "sigmoid" are accepted as methods
X, y = data
clf = LinearSVC()
clf_invalid_method = CalibratedClassifierCV(
clf, method="foo", ensemble=ensemble
)
with pytest.raises(ValueError):
clf_invalid_method.fit(X, y)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_regressor(data, ensemble):
# `base-estimator` should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
X, y = data
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble)
with pytest.raises(RuntimeError):
clf_base_regressor.fit(X, y)
def test_calibration_default_estimator(data):
# Check base_estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].base_estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(
clf, X, y, cv=3, method='decision_function'
)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0., 0.1, 0.2, 0.5, 0.9, 1.])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy='quantile')
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
assert_raises(ValueError, calibration_curve, y_true2, y_pred2,
strategy='percentile')
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', SimpleImputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(
clf, cv=2, method='isotonic', ensemble=ensemble
)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1:] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('X', [np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6)])
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def text_data():
text_data = [
{'state': 'NY', 'age': 'adult'},
{'state': 'TX', 'age': 'adult'},
{'state': 'VT', 'age': 'child'},
]
text_labels = [1, 0, 1]
return text_data, text_labels
@pytest.fixture
def text_data_pipeline(text_data):
X, y = text_data
pipeline_prefit = Pipeline([
('vectorizer', DictVectorizer()),
('clf', RandomForestClassifier())
])
return pipeline_prefit.fit(X, y)
def test_calibration_pipeline(text_data, text_data_pipeline):
# Test that calibration works in prefit pipeline with transformer,
# where `X` is not array-like, sparse matrix or dataframe at the start.
# See https://github.com/scikit-learn/scikit-learn/issues/8710
X, y = text_data
clf = text_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
msg = "'CalibratedClassifierCV' object has no attribute"
with pytest.raises(AttributeError, match=msg):
calib_clf.n_features_in_
@pytest.mark.parametrize('clf, cv', [
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), 'prefit'),
])
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
if cv == 'prefit':
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == 'prefit':
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
# FIXME: remove in 1.1
def test_calibrated_classifier_cv_deprecation(data):
# Check that we raise the proper deprecation warning if accessing
# `calibrators_` from the `_CalibratedClassifier`.
X, y = data
calib_clf = CalibratedClassifierCV(cv=2).fit(X, y)
with pytest.warns(FutureWarning):
calibrators = calib_clf.calibrated_classifiers_[0].calibrators_
for clf1, clf2 in zip(
calibrators, calib_clf.calibrated_classifiers_[0].calibrators
):
assert clf1 is clf2
| 38.374332
| 79
| 0.674935
|
4a193127fb81782028e542a41370ab8a66738c54
| 2,938
|
py
|
Python
|
venv/lib/python3.8/site-packages/pyqtgraph/util/mutex.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 23
|
2017-09-04T13:20:38.000Z
|
2022-03-08T08:15:17.000Z
|
venv/lib/python3.8/site-packages/pyqtgraph/util/mutex.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 102
|
2021-01-20T11:14:21.000Z
|
2021-12-12T17:34:42.000Z
|
venv/lib/python3.8/site-packages/pyqtgraph/util/mutex.py
|
willBear/willBear-Fundamental_Analysis
|
bc67eb1e69dcf6765c0b77314d37f7f165a7318f
|
[
"MIT"
] | 5
|
2017-11-26T19:40:46.000Z
|
2021-03-11T17:25:23.000Z
|
# -*- coding: utf-8 -*-
from ..Qt import QtCore
import traceback
class Mutex(QtCore.QMutex):
"""
Subclass of QMutex that provides useful debugging information during
deadlocks--tracebacks are printed for both the code location that is
attempting to lock the mutex as well as the location that has already
acquired the lock.
Also provides __enter__ and __exit__ methods for use in "with" statements.
"""
def __init__(self, *args, **kargs):
if kargs.get('recursive', False):
args = (QtCore.QMutex.Recursive,)
QtCore.QMutex.__init__(self, *args)
self.l = QtCore.QMutex() ## for serializing access to self.tb
self.tb = []
self.debug = True ## True to enable debugging functions
def tryLock(self, timeout=None, id=None):
if timeout is None:
locked = QtCore.QMutex.tryLock(self)
else:
locked = QtCore.QMutex.tryLock(self, timeout)
if self.debug and locked:
self.l.lock()
try:
if id is None:
self.tb.append(''.join(traceback.format_stack()[:-1]))
else:
self.tb.append(" " + str(id))
#print 'trylock', self, len(self.tb)
finally:
self.l.unlock()
return locked
def lock(self, id=None):
c = 0
waitTime = 5000 # in ms
while True:
if self.tryLock(waitTime, id):
break
c += 1
if self.debug:
self.l.lock()
try:
print("Waiting for mutex lock (%0.1f sec). Traceback follows:"
% (c*waitTime/1000.))
traceback.print_stack()
if len(self.tb) > 0:
print("Mutex is currently locked from:\n")
print(self.tb[-1])
else:
print("Mutex is currently locked from [???]")
finally:
self.l.unlock()
#print 'lock', self, len(self.tb)
def unlock(self):
QtCore.QMutex.unlock(self)
if self.debug:
self.l.lock()
try:
#print 'unlock', self, len(self.tb)
if len(self.tb) > 0:
self.tb.pop()
else:
raise Exception("Attempt to unlock mutex before it has been locked")
finally:
self.l.unlock()
def depth(self):
self.l.lock()
n = len(self.tb)
self.l.unlock()
return n
def traceback(self):
self.l.lock()
try:
ret = self.tb[:]
finally:
self.l.unlock()
return ret
def __exit__(self, *args):
self.unlock()
def __enter__(self):
self.lock()
return self
| 31.255319
| 88
| 0.491491
|
4a193134a3f42db292126dcf4985e83a0cb7c180
| 129
|
py
|
Python
|
src/lesson_language_tools/dis_constant_folding.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | 3
|
2018-08-14T09:33:52.000Z
|
2022-03-21T12:31:58.000Z
|
src/lesson_language_tools/dis_constant_folding.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
src/lesson_language_tools/dis_constant_folding.py
|
jasonwee/asus-rt-n14uhp-mrtg
|
4fa96c3406e32ea6631ce447db6d19d70b2cd061
|
[
"Apache-2.0"
] | null | null | null |
# Folded
i = 1 + 2
f = 3.4 * 5.6
s = 'Hello,' + ' World!'
# Not folded
I = i * 3 * 4
F = f / 2 / 3
S = s + '\n' + 'Fantastic!'
| 11.727273
| 27
| 0.418605
|
4a19321fda065d8068d96856531af52c42d555f9
| 3,006
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/layout/__init__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/layout/__init__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/layout/__init__.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import sys
if sys.version_info < (3, 7):
from ._activeshape import Activeshape
from ._angularaxis import AngularAxis
from ._annotation import Annotation
from ._coloraxis import Coloraxis
from ._colorscale import Colorscale
from ._font import Font
from ._geo import Geo
from ._grid import Grid
from ._hoverlabel import Hoverlabel
from ._image import Image
from ._legend import Legend
from ._mapbox import Mapbox
from ._margin import Margin
from ._modebar import Modebar
from ._newshape import Newshape
from ._polar import Polar
from ._radialaxis import RadialAxis
from ._scene import Scene
from ._shape import Shape
from ._slider import Slider
from ._template import Template
from ._ternary import Ternary
from ._title import Title
from ._transition import Transition
from ._uniformtext import Uniformtext
from ._updatemenu import Updatemenu
from ._xaxis import XAxis
from ._yaxis import YAxis
from . import annotation
from . import coloraxis
from . import geo
from . import grid
from . import hoverlabel
from . import legend
from . import mapbox
from . import newshape
from . import polar
from . import scene
from . import shape
from . import slider
from . import template
from . import ternary
from . import title
from . import updatemenu
from . import xaxis
from . import yaxis
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[
".annotation",
".coloraxis",
".geo",
".grid",
".hoverlabel",
".legend",
".mapbox",
".newshape",
".polar",
".scene",
".shape",
".slider",
".template",
".ternary",
".title",
".updatemenu",
".xaxis",
".yaxis",
],
[
"._activeshape.Activeshape",
"._angularaxis.AngularAxis",
"._annotation.Annotation",
"._coloraxis.Coloraxis",
"._colorscale.Colorscale",
"._font.Font",
"._geo.Geo",
"._grid.Grid",
"._hoverlabel.Hoverlabel",
"._image.Image",
"._legend.Legend",
"._mapbox.Mapbox",
"._margin.Margin",
"._modebar.Modebar",
"._newshape.Newshape",
"._polar.Polar",
"._radialaxis.RadialAxis",
"._scene.Scene",
"._shape.Shape",
"._slider.Slider",
"._template.Template",
"._ternary.Ternary",
"._title.Title",
"._transition.Transition",
"._uniformtext.Uniformtext",
"._updatemenu.Updatemenu",
"._xaxis.XAxis",
"._yaxis.YAxis",
],
)
| 28.358491
| 55
| 0.560546
|
4a19323113ce9d7a6cd11ca8aa76ae0cd7e5f908
| 50
|
py
|
Python
|
AFluentPython/tools/__init__.py
|
RoboCuper-hujinlei/AI-Job
|
cf7a081d59700d75f0f2dc73b6d5130863796f01
|
[
"Apache-2.0"
] | null | null | null |
AFluentPython/tools/__init__.py
|
RoboCuper-hujinlei/AI-Job
|
cf7a081d59700d75f0f2dc73b6d5130863796f01
|
[
"Apache-2.0"
] | null | null | null |
AFluentPython/tools/__init__.py
|
RoboCuper-hujinlei/AI-Job
|
cf7a081d59700d75f0f2dc73b6d5130863796f01
|
[
"Apache-2.0"
] | null | null | null |
"""
介绍python常用的内建模块
"""
print("this is __init__")
| 10
| 25
| 0.68
|
4a193565211429ecf40cc8b9f3eece94bbe2e516
| 1,455
|
py
|
Python
|
tables/utilities.py
|
seanbreckenridge/rotten_tomatoes_cli
|
e250ad0d8bc70bf385540ab9264c440feb90d02c
|
[
"MIT"
] | null | null | null |
tables/utilities.py
|
seanbreckenridge/rotten_tomatoes_cli
|
e250ad0d8bc70bf385540ab9264c440feb90d02c
|
[
"MIT"
] | null | null | null |
tables/utilities.py
|
seanbreckenridge/rotten_tomatoes_cli
|
e250ad0d8bc70bf385540ab9264c440feb90d02c
|
[
"MIT"
] | null | null | null |
import re
from termcolor import colored
class RottenTomatoesScoreFormatter:
def __init__(self):
pass
def format(self, rating):
if rating is None:
return "N/A"
return colored(text="{rating}% ".format(rating=rating), color=self.rating_color(rating=rating))
def rating_color(self, rating):
if rating < 25:
return "cyan"
elif rating < 50:
return "green"
elif rating < 75:
return "white"
elif rating < 90:
return "yellow"
else:
return "red"
class MpaaRatingFormatter:
def __init__(self):
pass
def format(self, rating):
return colored(text=rating, color=self.rating_color(rating=rating))
def rating_color(self, rating):
if rating == "NR":
return "white"
elif rating == "G":
return "green"
elif rating == "PG":
return "cyan"
elif rating == "PG13":
return "yellow"
elif rating == "R":
return "red"
else:
return "magenta"
def convert_to_ascii(text):
return text.encode("ascii", "ignore").decode("ascii")
def clean_html(raw_html):
clean = re.compile("<.*?>")
clean_text = re.sub(clean, "", raw_html)
return clean_text
def formatted_header(text):
return colored(text=convert_to_ascii(text=text), attrs=["bold", "underline"])
| 20.208333
| 103
| 0.564948
|
4a1935f66a581ad2a2efdc8fce23ca4a9bf5f6c1
| 76,614
|
py
|
Python
|
sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py
|
jalauzon-msft/azure-sdk-for-python
|
15967f5c6d3376f2334a382486ba86339786e028
|
[
"MIT"
] | null | null | null |
sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py
|
jalauzon-msft/azure-sdk-for-python
|
15967f5c6d3376f2334a382486ba86339786e028
|
[
"MIT"
] | null | null | null |
sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py
|
jalauzon-msft/azure-sdk-for-python
|
15967f5c6d3376f2334a382486ba86339786e028
|
[
"MIT"
] | null | null | null |
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from collections import defaultdict
import os
import pytest
import functools
import itertools
import datetime
import json
from unittest import mock
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, TextAnalyticsPreparer, is_public_cloud
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from devtools_testutils import recorded_by_proxy, set_bodiless_matcher
from azure.ai.textanalytics import (
TextAnalyticsClient,
RecognizeEntitiesAction,
RecognizeLinkedEntitiesAction,
RecognizePiiEntitiesAction,
ExtractKeyPhrasesAction,
AnalyzeSentimentAction,
TextDocumentInput,
VERSION,
TextAnalyticsApiVersion,
_AnalyzeActionsType,
ExtractKeyPhrasesResult,
AnalyzeSentimentResult,
RecognizeLinkedEntitiesResult,
RecognizeEntitiesResult,
RecognizePiiEntitiesResult,
PiiEntityCategory,
SingleCategoryClassifyAction,
MultiCategoryClassifyAction,
RecognizeCustomEntitiesAction,
SingleCategoryClassifyResult,
MultiCategoryClassifyResult,
RecognizeCustomEntitiesResult,
AnalyzeHealthcareEntitiesAction
)
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
TextAnalyticsCustomPreparer = functools.partial(
TextAnalyticsPreparer,
textanalytics_custom_text_endpoint="https://fakeendpoint.cognitiveservices.azure.com",
textanalytics_custom_text_key="fakeZmFrZV9hY29jdW50X2tleQ==",
textanalytics_single_category_classify_project_name="single_category_classify_project_name",
textanalytics_single_category_classify_deployment_name="single_category_classify_deployment_name",
textanalytics_multi_category_classify_project_name="multi_category_classify_project_name",
textanalytics_multi_category_classify_deployment_name="multi_category_classify_deployment_name",
textanalytics_custom_entities_project_name="custom_entities_project_name",
textanalytics_custom_entities_deployment_name="custom_entities_deployment_name",
)
class TestAnalyze(TextAnalyticsTest):
def _interval(self):
return 5 if self.is_live else 0
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_no_single_input(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(TypeError):
response = client.begin_analyze_actions("hello world", actions=[], polling_interval=self._interval())
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_all_successful_passing_dict_key_phrase_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
document_results = list(response)
assert len(document_results) == 2
for document_result in document_results:
assert len(document_result) == 1
for document_result in document_result:
assert isinstance(document_result, ExtractKeyPhrasesResult)
assert "Paul Allen" in document_result.key_phrases
assert "Bill Gates" in document_result.key_phrases
assert "Microsoft" in document_result.key_phrases
assert document_result.id is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_all_successful_passing_dict_sentiment_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed at. It was too expensive."},
{"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}]
response = client.begin_analyze_actions(
docs,
actions=[AnalyzeSentimentAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
pages = list(response)
assert len(pages) == len(docs)
for idx, document_results in enumerate(pages):
assert len(document_results) == 1
document_result = document_results[0]
assert isinstance(document_result, AnalyzeSentimentResult)
assert document_result.id is not None
assert document_result.statistics is not None
self.validateConfidenceScores(document_result.confidence_scores)
assert document_result.sentences is not None
if idx == 0:
assert document_result.sentiment == "neutral"
assert len(document_result.sentences) == 1
assert document_result.sentences[0].text == "Microsoft was founded by Bill Gates and Paul Allen."
elif idx == 1:
assert document_result.sentiment == "negative"
assert len(document_result.sentences) == 2
assert document_result.sentences[0].text == "I did not like the hotel we stayed at."
assert document_result.sentences[1].text == "It was too expensive."
else:
assert document_result.sentiment == "positive"
assert len(document_result.sentences) == 2
assert document_result.sentences[0].text == "The restaurant had really good food."
assert document_result.sentences[1].text == "I recommend you try it."
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_sentiment_analysis_task_with_opinion_mining(self, client):
documents = [
"It has a sleek premium aluminum design that makes it beautiful to look at.",
"The food and service is not good"
]
response = client.begin_analyze_actions(
documents,
actions=[AnalyzeSentimentAction(show_opinion_mining=True)],
show_stats=True,
polling_interval=self._interval(),
).result()
pages = list(response)
assert len(pages) == len(documents)
for idx, document_results in enumerate(pages):
assert len(document_results) == 1
document_result = document_results[0]
assert isinstance(document_result, AnalyzeSentimentResult)
for sentence in document_result.sentences:
if idx == 0:
for mined_opinion in sentence.mined_opinions:
target = mined_opinion.target
assert 'design' == target.text
assert 'positive' == target.sentiment
assert 0.0 == target.confidence_scores.neutral
self.validateConfidenceScores(target.confidence_scores)
assert 32 == target.offset
sleek_opinion = mined_opinion.assessments[0]
assert 'sleek' == sleek_opinion.text
assert 'positive' == sleek_opinion.sentiment
assert 0.0 == sleek_opinion.confidence_scores.neutral
self.validateConfidenceScores(sleek_opinion.confidence_scores)
assert 9 == sleek_opinion.offset
assert not sleek_opinion.is_negated
premium_opinion = mined_opinion.assessments[1]
assert 'premium' == premium_opinion.text
assert 'positive' == premium_opinion.sentiment
assert 0.0 == premium_opinion.confidence_scores.neutral
self.validateConfidenceScores(premium_opinion.confidence_scores)
assert 15 == premium_opinion.offset
assert not premium_opinion.is_negated
else:
food_target = sentence.mined_opinions[0].target
service_target = sentence.mined_opinions[1].target
self.validateConfidenceScores(food_target.confidence_scores)
assert 4 == food_target.offset
assert 'service' == service_target.text
assert 'negative' == service_target.sentiment
assert 0.0 == service_target.confidence_scores.neutral
self.validateConfidenceScores(service_target.confidence_scores)
assert 13 == service_target.offset
food_opinion = sentence.mined_opinions[0].assessments[0]
service_opinion = sentence.mined_opinions[1].assessments[0]
self.assertOpinionsEqual(food_opinion, service_opinion)
assert 'good' == food_opinion.text
assert 'negative' == food_opinion.sentiment
assert 0.0 == food_opinion.confidence_scores.neutral
self.validateConfidenceScores(food_opinion.confidence_scores)
assert 28 == food_opinion.offset
assert food_opinion.is_negated
service_target = sentence.mined_opinions[1].target
assert 'food' == food_target.text
assert 'negative' == food_target.sentiment
assert 0.0 == food_target.confidence_scores.neutral
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_all_successful_passing_text_document_input_entities_task(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975", language="en"),
TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen el 4 de abril de 1975.", language="es"),
TextDocumentInput(id="3", text="Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet.", language="de"),
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizeEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
pages = list(response)
assert len(pages) == len(docs)
for document_results in pages:
assert len(document_results) == 1
document_result = document_results[0]
assert isinstance(document_result, RecognizeEntitiesResult)
assert len(document_result.entities) == 4
assert document_result.id is not None
for entity in document_result.entities:
assert entity.text is not None
assert entity.category is not None
assert entity.offset is not None
assert entity.confidence_score is not None
assert entity.category is not None
assert entity.offset is not None
assert entity.confidence_score is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_all_successful_passing_string_pii_entities_task(self, client):
docs = ["My SSN is 859-98-0987.",
"Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check.",
"Is 998.214.865-68 your Brazilian CPF number?"
]
response = client.begin_analyze_actions(
docs,
actions=[RecognizePiiEntitiesAction()],
show_stats=True,
polling_interval=self._interval(),
).result()
pages = list(response)
assert len(pages) == len(docs)
for idx, document_results in enumerate(pages):
assert len(document_results) == 1
document_result = document_results[0]
assert isinstance(document_result, RecognizePiiEntitiesResult)
if idx == 0:
assert document_result.entities[0].text == "859-98-0987"
assert document_result.entities[0].category == "USSocialSecurityNumber"
elif idx == 1:
assert document_result.entities[0].text == "111000025"
for entity in document_result.entities:
assert entity.text is not None
assert entity.category is not None
assert entity.offset is not None
assert entity.confidence_score is not None
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_bad_request_on_empty_document(self, client):
docs = [""]
with pytest.raises(HttpResponseError):
response = client.begin_analyze_actions(
docs,
actions=[ExtractKeyPhrasesAction()],
polling_interval=self._interval(),
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"textanalytics_test_api_key": "",
})
@recorded_by_proxy
def test_empty_credential_class(self, client):
with pytest.raises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
],
polling_interval=self._interval(),
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"textanalytics_test_api_key": "xxxxxxxxxxxx",
})
@recorded_by_proxy
def test_bad_credentials(self, client):
with pytest.raises(ClientAuthenticationError):
response = client.begin_analyze_actions(
["This is written in English."],
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
],
polling_interval=self._interval(),
)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_out_of_order_ids_multiple_tasks(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
],
polling_interval=self._interval(),
).result()
results = list(response)
assert len(results) == len(docs)
document_order = ["56", "0", "19", "1"]
action_order = [
_AnalyzeActionsType.RECOGNIZE_ENTITIES,
_AnalyzeActionsType.EXTRACT_KEY_PHRASES,
_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES,
_AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES,
_AnalyzeActionsType.ANALYZE_SENTIMENT,
]
for doc_idx, document_results in enumerate(results):
assert len(document_results) == 5
for action_idx, document_result in enumerate(document_results):
assert document_result.id == document_order[doc_idx]
assert self.document_result_to_action_type(document_result) == action_order[action_idx]
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": "v3.1"})
@recorded_by_proxy
def test_show_stats_and_model_version_multiple_tasks_v3_1(self, client):
def callback(resp):
assert resp.raw_response
tasks = resp.raw_response['tasks']
assert tasks['completed'] == 5
assert tasks['inProgress'] == 0
assert tasks['failed'] == 0
assert tasks['total'] == 5
num_tasks = 0
for key, task in tasks.items():
if "Tasks" in key:
num_tasks += 1
assert len(task) == 1
task_stats = task[0]['results']['statistics']
assert task_stats['documentsCount'] == 4
assert task_stats['validDocumentsCount'] == 4
assert task_stats['erroneousDocumentsCount'] == 0
assert task_stats['transactionsCount'] == 4
assert num_tasks == 5
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="latest"),
RecognizePiiEntitiesAction(model_version="latest"),
RecognizeLinkedEntitiesAction(model_version="latest"),
AnalyzeSentimentAction(model_version="latest"),
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=callback,
)
response = poller.result()
pages = list(response)
assert len(pages) == len(docs)
action_order = [
_AnalyzeActionsType.RECOGNIZE_ENTITIES,
_AnalyzeActionsType.EXTRACT_KEY_PHRASES,
_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES,
_AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES,
_AnalyzeActionsType.ANALYZE_SENTIMENT,
]
for document_results in pages:
assert len(document_results) == len(action_order)
for document_result in document_results:
assert document_result.statistics
assert document_result.statistics.character_count
assert document_result.statistics.transaction_count
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_show_stats_and_model_version_multiple_tasks(self, client):
def callback(resp):
assert resp.raw_response
tasks = resp.raw_response['tasks']
assert tasks['completed'] == 5
assert tasks['inProgress'] == 0
assert tasks['failed'] == 0
assert tasks['total'] == 5
num_tasks = 0
for task in tasks["items"]:
num_tasks += 1
task_stats = task['results']['statistics']
assert task_stats['documentsCount'] == 4
assert task_stats['validDocumentsCount'] == 4
assert task_stats['erroneousDocumentsCount'] == 0
assert task_stats['transactionsCount'] == 4
assert num_tasks == 5
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="latest"),
RecognizePiiEntitiesAction(model_version="latest"),
RecognizeLinkedEntitiesAction(model_version="latest"),
AnalyzeSentimentAction(model_version="latest"),
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=callback,
)
response = poller.result()
pages = list(response)
assert len(pages) == len(docs)
action_order = [
_AnalyzeActionsType.RECOGNIZE_ENTITIES,
_AnalyzeActionsType.EXTRACT_KEY_PHRASES,
_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES,
_AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES,
_AnalyzeActionsType.ANALYZE_SENTIMENT,
]
for document_results in pages:
assert len(document_results) == len(action_order)
for document_result in document_results:
assert document_result.statistics
assert document_result.statistics.character_count
assert document_result.statistics.transaction_count
@pytest.mark.skip("code changes needed before we can run test")
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_poller_metadata(self, client):
docs = [{"id": "56", "text": ":)"}]
poller = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest")
],
show_stats=True,
polling_interval=self._interval(),
)
poller.result()
assert isinstance(poller.created_on, datetime.datetime)
assert not poller.display_name
assert isinstance(poller.expires_on, datetime.datetime)
assert poller.actions_failed_count == 0
assert poller.actions_in_progress_count == 0
assert poller.actions_succeeded_count == 1
assert isinstance(poller.last_modified_on, datetime.datetime)
assert poller.total_actions_count == 1
assert poller.id
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_invalid_language_hint_method(self, client):
response = list(client.begin_analyze_actions(
["This should fail because we're passing in an invalid language hint"],
language="notalanguage",
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
],
polling_interval=self._interval(),
).result())
for document_results in response:
for doc in document_results:
assert doc.is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_bad_model_version_error_multiple_tasks(self, client):
docs = [{"id": "1", "language": "en", "text": "I did not like the hotel we stayed at."}]
with pytest.raises(HttpResponseError):
res = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="latest"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad"),
],
polling_interval=self._interval(),
).result()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_bad_model_version_error_all_tasks(self, client): # TODO: verify behavior of service
docs = [{"id": "1", "language": "en", "text": "I did not like the hotel we stayed at."}]
with pytest.raises(HttpResponseError):
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(model_version="bad"),
ExtractKeyPhrasesAction(model_version="bad"),
RecognizePiiEntitiesAction(model_version="bad"),
RecognizeLinkedEntitiesAction(model_version="bad"),
AnalyzeSentimentAction(model_version="bad"),
],
polling_interval=self._interval(),
).result()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_missing_input_records_error(self, **kwargs):
client = kwargs.pop("client")
docs = []
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
],
polling_interval=self._interval(),
)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_passing_none_docs(self, **kwargs):
client = kwargs.pop("client")
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(None, None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = client.begin_analyze_actions(
documents=["Test passing cls to endpoint"],
actions=[
RecognizeEntitiesAction(),
],
cls=callback,
polling_interval=self._interval(),
).result()
assert res == "cls result"
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_multiple_pages_of_results_returned_successfully(self, client):
single_doc = "hello world"
docs = [{"id": str(idx), "text": val} for (idx, val) in enumerate(list(itertools.repeat(single_doc, 25)))] # max number of documents is 25
result = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
],
show_stats=True,
polling_interval=self._interval(),
).result()
pages = list(result)
assert len(pages) == len(docs)
action_order = [
_AnalyzeActionsType.RECOGNIZE_ENTITIES,
_AnalyzeActionsType.EXTRACT_KEY_PHRASES,
_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES,
_AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES,
_AnalyzeActionsType.ANALYZE_SENTIMENT,
]
action_type_to_document_results = defaultdict(list)
for doc_idx, page in enumerate(pages):
for action_idx, document_result in enumerate(page):
assert document_result.id == str(doc_idx)
action_type = self.document_result_to_action_type(document_result)
assert action_type == action_order[action_idx]
action_type_to_document_results[action_type].append(document_result)
assert len(action_type_to_document_results) == len(action_order)
for document_results in action_type_to_document_results.values():
assert len(document_results) == len(docs)
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_too_many_documents(self, client):
docs = list(itertools.repeat("input document", 26)) # Maximum number of documents per request is 25
with pytest.raises(HttpResponseError) as excinfo:
client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
],
polling_interval=self._interval(),
)
assert excinfo.value.status_code == 400
@pytest.mark.skipif(not is_public_cloud(), reason='Usgov and China Cloud are not supported')
@TextAnalyticsCustomPreparer()
@recorded_by_proxy
def test_disable_service_logs(
self,
textanalytics_custom_text_endpoint,
textanalytics_custom_text_key,
textanalytics_single_category_classify_project_name,
textanalytics_single_category_classify_deployment_name,
textanalytics_multi_category_classify_project_name,
textanalytics_multi_category_classify_deployment_name,
textanalytics_custom_entities_project_name,
textanalytics_custom_entities_deployment_name
):
set_bodiless_matcher() # don't match on body for this test since we scrub the proj/deployment values
client = TextAnalyticsClient(textanalytics_custom_text_endpoint, AzureKeyCredential(textanalytics_custom_text_key))
actions = [
RecognizeEntitiesAction(disable_service_logs=True),
ExtractKeyPhrasesAction(disable_service_logs=True),
RecognizePiiEntitiesAction(disable_service_logs=True),
RecognizeLinkedEntitiesAction(disable_service_logs=True),
AnalyzeSentimentAction(disable_service_logs=True),
SingleCategoryClassifyAction(
project_name=textanalytics_single_category_classify_project_name,
deployment_name=textanalytics_single_category_classify_deployment_name,
disable_service_logs=True
),
MultiCategoryClassifyAction(
project_name=textanalytics_multi_category_classify_project_name,
deployment_name=textanalytics_multi_category_classify_deployment_name,
disable_service_logs=True
),
RecognizeCustomEntitiesAction(
project_name=textanalytics_custom_entities_project_name,
deployment_name=textanalytics_custom_entities_deployment_name,
disable_service_logs=True
),
AnalyzeHealthcareEntitiesAction(disable_service_logs=True)
]
for action in actions:
assert action.disable_service_logs
def callback(resp):
tasks = json.loads(resp.http_request.body)["tasks"]
assert len(tasks) == len(actions)
for task in tasks.values():
assert task[0]["parameters"]["loggingOptOut"]
client.begin_analyze_actions(
documents=["Test for logging disable"],
actions=actions,
polling_interval=self._interval(),
raw_response_hook=callback,
).result()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_pii_action_categories_filter(self, client):
docs = [{"id": "1", "text": "My SSN is 859-98-0987."},
{"id": "2",
"text": "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."},
{"id": "3", "text": "Is 998.214.865-68 your Brazilian CPF number?"}]
actions = [
RecognizePiiEntitiesAction(
categories_filter=[
PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER,
PiiEntityCategory.ABA_ROUTING_NUMBER,
]
),
]
result = client.begin_analyze_actions(documents=docs, actions=actions, polling_interval=self._interval()).result()
action_results = list(result)
assert len(action_results) == 3
assert action_results[0][0].entities[0].text == "859-98-0987"
assert action_results[0][0].entities[0].category == PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER
assert action_results[1][0].entities[0].text == "111000025"
assert action_results[1][0].entities[0].category == PiiEntityCategory.ABA_ROUTING_NUMBER
assert action_results[2][0].entities == [] # No Brazilian CPF since not in categories_filter
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_partial_success_for_actions(self, client):
docs = [{"id": "1", "language": "tr", "text": "I did not like the hotel we stayed at."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed at."}]
response = client.begin_analyze_actions(
docs,
actions=[
AnalyzeSentimentAction(),
RecognizePiiEntitiesAction(),
],
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == len(docs)
action_order = [
_AnalyzeActionsType.ANALYZE_SENTIMENT,
_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES,
]
assert len(action_results[0]) == len(action_order)
assert len(action_results[1]) == len(action_order)
# first doc
assert isinstance(action_results[0][0], AnalyzeSentimentResult)
assert action_results[0][0].id == "1"
assert action_results[0][1].is_error
assert action_results[0][1].id == "1"
# second doc
assert isinstance(action_results[1][0], AnalyzeSentimentResult)
assert action_results[1][0].id == "2"
assert isinstance(action_results[1][1], RecognizePiiEntitiesResult)
assert action_results[1][1].id == "2"
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_multiple_of_same_action(self, client):
docs = [
{"id": "28", "text": "My SSN is 859-98-0987. Here is another sentence."},
{"id": "3", "text": "Is 998.214.865-68 your Brazilian CPF number? Here is another sentence."},
{"id": "5", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
]
actions = [
AnalyzeSentimentAction(),
RecognizePiiEntitiesAction(),
RecognizeEntitiesAction(),
RecognizeLinkedEntitiesAction(),
RecognizePiiEntitiesAction(categories_filter=[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]),
ExtractKeyPhrasesAction(),
RecognizeEntitiesAction(),
AnalyzeSentimentAction(show_opinion_mining=True),
RecognizeLinkedEntitiesAction(),
ExtractKeyPhrasesAction(),
]
response = client.begin_analyze_actions(
docs,
actions=actions,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == len(docs)
assert len(action_results[0]) == len(actions)
assert len(action_results[1]) == len(actions)
assert len(action_results[2]) == len(actions)
for idx, action_result in enumerate(action_results):
if idx == 0:
doc_id = "28"
elif idx == 1:
doc_id = "3"
else:
doc_id = "5"
assert isinstance(action_result[0], AnalyzeSentimentResult)
assert not all([sentence.mined_opinions for sentence in action_result[0].sentences])
assert action_result[0].id == doc_id
assert isinstance(action_result[1], RecognizePiiEntitiesResult)
assert action_result[1].id == doc_id
assert isinstance(action_result[2], RecognizeEntitiesResult)
assert action_result[2].id == doc_id
assert isinstance(action_result[3], RecognizeLinkedEntitiesResult)
assert action_result[3].id == doc_id
assert isinstance(action_result[4], RecognizePiiEntitiesResult)
assert action_result[4].id == doc_id
if doc_id == "28":
assert action_result[4].entities
else:
assert not action_result[4].entities
assert isinstance(action_result[5], ExtractKeyPhrasesResult)
assert action_result[5].id == doc_id
assert isinstance(action_result[6], RecognizeEntitiesResult)
assert action_result[6].id == doc_id
assert isinstance(action_result[7], AnalyzeSentimentResult)
assert [sentence.mined_opinions for sentence in action_result[0].sentences]
assert action_result[7].id == doc_id
assert isinstance(action_result[8], RecognizeLinkedEntitiesResult)
assert action_result[8].id == doc_id
assert isinstance(action_result[9], ExtractKeyPhrasesResult)
assert action_result[9].id == doc_id
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_multiple_of_same_action_with_partial_results(self, client):
docs = [{"id": "5", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "text": ""}]
actions = [
RecognizeEntitiesAction(),
RecognizePiiEntitiesAction(),
RecognizeEntitiesAction(disable_service_logs=True)
]
response = client.begin_analyze_actions(
docs,
actions=actions,
polling_interval=self._interval(),
).result()
action_results = list(response)
assert len(action_results) == len(docs)
assert len(action_results[0]) == len(actions)
assert len(action_results[1]) == len(actions)
# first doc
assert isinstance(action_results[0][0], RecognizeEntitiesResult)
assert action_results[0][0].id == "5"
assert isinstance(action_results[0][1], RecognizePiiEntitiesResult)
assert action_results[0][1].id == "5"
assert isinstance(action_results[0][2], RecognizeEntitiesResult)
assert action_results[0][2].id == "5"
# second doc
assert action_results[1][0].is_error
assert action_results[1][1].is_error
assert action_results[1][2].is_error
@pytest.mark.skip("code changes needed before we can run test")
@pytest.mark.skipif(not is_public_cloud(), reason='Usgov and China Cloud are not supported')
@TextAnalyticsCustomPreparer()
@recorded_by_proxy
def test_single_category_classify(
self,
textanalytics_custom_text_endpoint,
textanalytics_custom_text_key,
textanalytics_single_category_classify_project_name,
textanalytics_single_category_classify_deployment_name
):
set_bodiless_matcher() # don't match on body for this test since we scrub the proj/deployment values
client = TextAnalyticsClient(textanalytics_custom_text_endpoint, AzureKeyCredential(textanalytics_custom_text_key))
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": "David Schmidt, senior vice president--Food Safety, International Food Information Council (IFIC), Washington, D.C., discussed the physical activity component."},
{"id": "3", "language": "en", "text": "I need a reservation for an indoor restaurant in China. Please don't stop the music. Play music and add it to my playlist"},
]
response = client.begin_analyze_actions(
docs,
actions=[
SingleCategoryClassifyAction(
project_name=textanalytics_single_category_classify_project_name,
deployment_name=textanalytics_single_category_classify_deployment_name
)
],
show_stats=True,
polling_interval=self._interval(),
).result()
document_results = list(response)
for doc_result in document_results:
for result in doc_result:
assert result.id
assert not result.is_error
assert not result.warnings
assert result.statistics
assert result.classification.category
assert result.classification.confidence_score
@pytest.mark.skipif(not is_public_cloud(), reason='Usgov and China Cloud are not supported')
@TextAnalyticsCustomPreparer()
@recorded_by_proxy
def test_multi_category_classify(
self,
textanalytics_custom_text_endpoint,
textanalytics_custom_text_key,
textanalytics_multi_category_classify_project_name,
textanalytics_multi_category_classify_deployment_name
):
set_bodiless_matcher() # don't match on body for this test since we scrub the proj/deployment values
client = TextAnalyticsClient(textanalytics_custom_text_endpoint, AzureKeyCredential(textanalytics_custom_text_key))
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": "David Schmidt, senior vice president--Food Safety, International Food Information Council (IFIC), Washington, D.C., discussed the physical activity component."},
{"id": "3", "language": "en", "text": "I need a reservation for an indoor restaurant in China. Please don't stop the music. Play music and add it to my playlist"},
]
response = client.begin_analyze_actions(
docs,
actions=[
MultiCategoryClassifyAction(
project_name=textanalytics_multi_category_classify_project_name,
deployment_name=textanalytics_multi_category_classify_deployment_name
)
],
show_stats=True,
polling_interval=self._interval(),
).result()
document_results = list(response)
for doc_result in document_results:
for result in doc_result:
assert result.id
assert not result.is_error
assert not result.warnings
assert result.statistics
for classification in result.classifications:
assert classification.category
assert classification.confidence_score
@pytest.mark.skipif(not is_public_cloud(), reason='Usgov and China Cloud are not supported')
@TextAnalyticsCustomPreparer()
@recorded_by_proxy
def test_recognize_custom_entities(
self,
textanalytics_custom_text_endpoint,
textanalytics_custom_text_key,
textanalytics_custom_entities_project_name,
textanalytics_custom_entities_deployment_name
):
set_bodiless_matcher() # don't match on body for this test since we scrub the proj/deployment values
client = TextAnalyticsClient(textanalytics_custom_text_endpoint, AzureKeyCredential(textanalytics_custom_text_key))
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": "David Schmidt, senior vice president--Food Safety, International Food Information Council (IFIC), Washington, D.C., discussed the physical activity component."},
{"id": "3", "language": "en", "text": "I need a reservation for an indoor restaurant in China. Please don't stop the music. Play music and add it to my playlist"},
]
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeCustomEntitiesAction(
project_name=textanalytics_custom_entities_project_name,
deployment_name=textanalytics_custom_entities_deployment_name
)
],
show_stats=True,
polling_interval=self._interval(),
).result()
document_results = list(response)
for doc_result in document_results:
for result in doc_result:
assert result.id
assert not result.is_error
assert not result.warnings
assert result.statistics
for entity in result.entities:
assert entity.text
assert entity.category
assert entity.offset is not None
assert entity.length is not None
assert entity.confidence_score is not None
@pytest.mark.skip("code changes needed before we can run test")
@pytest.mark.skipif(not is_public_cloud(), reason='Usgov and China Cloud are not supported')
@TextAnalyticsCustomPreparer()
@recorded_by_proxy
def test_custom_partial_error(
self,
textanalytics_custom_text_endpoint,
textanalytics_custom_text_key,
textanalytics_single_category_classify_project_name,
textanalytics_single_category_classify_deployment_name,
textanalytics_multi_category_classify_project_name,
textanalytics_multi_category_classify_deployment_name,
textanalytics_custom_entities_project_name,
textanalytics_custom_entities_deployment_name
):
set_bodiless_matcher() # don't match on body for this test since we scrub the proj/deployment values
client = TextAnalyticsClient(textanalytics_custom_text_endpoint, AzureKeyCredential(textanalytics_custom_text_key))
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": ""},
]
response = client.begin_analyze_actions(
docs,
actions=[
SingleCategoryClassifyAction(
project_name=textanalytics_single_category_classify_project_name,
deployment_name=textanalytics_single_category_classify_deployment_name
),
MultiCategoryClassifyAction(
project_name=textanalytics_multi_category_classify_project_name,
deployment_name=textanalytics_multi_category_classify_deployment_name
),
RecognizeCustomEntitiesAction(
project_name=textanalytics_custom_entities_project_name,
deployment_name=textanalytics_custom_entities_deployment_name
)
],
show_stats=True,
polling_interval=self._interval(),
).result()
document_results = list(response)
assert len(document_results) == 2
assert isinstance(document_results[0][0], SingleCategoryClassifyResult)
assert isinstance(document_results[0][1], MultiCategoryClassifyResult)
assert isinstance(document_results[0][2], RecognizeCustomEntitiesResult)
assert document_results[1][0].is_error
assert document_results[1][1].is_error
assert document_results[1][2].is_error
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_analyze_continuation_token(self, client):
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": "David Schmidt, senior vice president--Food Safety, International Food Information Council (IFIC), Washington, D.C., discussed the physical activity component."},
{"id": "3", "text": ""},
{"id": "4", "language": "en", "text": "I need a reservation for an indoor restaurant in China. Please don't stop the music. Play music and add it to my playlist"},
]
actions = [
RecognizeEntitiesAction(),
RecognizePiiEntitiesAction(),
AnalyzeSentimentAction(),
ExtractKeyPhrasesAction(),
]
initial_poller = client.begin_analyze_actions(
docs,
actions=actions,
show_stats=True,
polling_interval=self._interval(),
)
cont_token = initial_poller.continuation_token()
poller = client.begin_analyze_actions(
None,
None,
continuation_token=cont_token,
polling_interval=self._interval(),
)
response = poller.result()
action_results = list(response)
assert len(action_results) == len(docs)
action_order = [
_AnalyzeActionsType.RECOGNIZE_ENTITIES,
_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES,
_AnalyzeActionsType.ANALYZE_SENTIMENT,
_AnalyzeActionsType.EXTRACT_KEY_PHRASES,
]
document_order = ["1", "2", "3", "4"]
for doc_idx, document_results in enumerate(action_results):
assert len(document_results) == 4
for action_idx, document_result in enumerate(document_results):
if doc_idx == 2:
assert document_result.id == document_order[doc_idx]
assert document_result.is_error
else:
assert document_result.id == document_order[doc_idx]
assert document_result.statistics
assert self.document_result_to_action_type(document_result) == action_order[action_idx]
initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@TextAnalyticsPreparer()
def test_generic_action_error_no_target_v3_1(
self,
**kwargs
):
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": ""},
]
response = mock.Mock(
status_code=200,
headers={"Content-Type": "application/json", "operation-location": "https://fakeurl.com"}
)
path_to_mock_json_response = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"./mock_test_responses/action_error_no_target.json",
)
)
with open(path_to_mock_json_response) as fd:
mock_json_response = json.loads(fd.read())
response.text = lambda encoding=None: json.dumps(mock_json_response)
response.content_type = "application/json"
transport = mock.Mock(send=lambda request, **kwargs: response)
endpoint = kwargs.pop("textanalytics_test_endpoint")
key = kwargs.pop("textanalytics_test_api_key")
client = TextAnalyticsClient(endpoint, AzureKeyCredential(key), transport=transport, api_version="v3.1")
with pytest.raises(HttpResponseError) as e:
response = list(client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
RecognizeLinkedEntitiesAction(),
RecognizePiiEntitiesAction()
],
show_stats=True,
polling_interval=self._interval(),
).result())
assert e.value.message == "(InternalServerError) 1 out of 3 job tasks failed. Failed job tasks : v3.1/entities/general."
@TextAnalyticsPreparer()
def test_generic_action_error_no_target(
self,
**kwargs
):
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": ""},
]
response = mock.Mock(
status_code=200,
headers={"Content-Type": "application/json", "operation-location": "https://fakeurl.com"}
)
path_to_mock_json_response = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"./mock_test_responses/action_error_no_target_language.json",
)
)
with open(path_to_mock_json_response) as fd:
mock_json_response = json.loads(fd.read())
response.text = lambda encoding=None: json.dumps(mock_json_response)
response.content_type = "application/json"
transport = mock.Mock(send=lambda request, **kwargs: response)
endpoint = kwargs.pop("textanalytics_test_endpoint")
key = kwargs.pop("textanalytics_test_api_key")
client = TextAnalyticsClient(endpoint, AzureKeyCredential(key), transport=transport)
# workaround to get mocked response to work with deserialized polymorphic response type
def get_deserialized_for_mock(response, deserialized, headers):
from azure.ai.textanalytics._generated.models import AnalyzeTextJobState, AnalyzeTextJobsInput
from azure.ai.textanalytics._response_handlers import analyze_paged_result
deserialized = AnalyzeTextJobState.deserialize(response.raw_response)
return analyze_paged_result(
["1", "2"],
[(_AnalyzeActionsType.RECOGNIZE_ENTITIES, '0'),
(_AnalyzeActionsType.EXTRACT_KEY_PHRASES, '1'),
(_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES, '2'),
(_AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES, '3'),
(_AnalyzeActionsType.ANALYZE_SENTIMENT, '4'),
],
client._client.analyze_text_job_status,
response,
deserialized,
headers,
show_stats=True,
)
with pytest.raises(HttpResponseError) as e:
response = list(client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=lambda resp: resp,
cls=get_deserialized_for_mock
).result())
assert e.value.message == "(InternalServerError) 1 out of 5 job tasks failed. Failed job tasks : keyphrasescomposite."
@TextAnalyticsPreparer()
def test_action_errors_with_targets_v3_1(
self,
**kwargs
):
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": ""},
]
response = mock.Mock(
status_code=200,
headers={"Content-Type": "application/json", "operation-location": "https://fakeurl.com"}
)
# a mix of action errors to translate to doc errors, regular doc errors, and a successful response
path_to_mock_json_response = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"./mock_test_responses/action_error_with_targets.json",
)
)
with open(path_to_mock_json_response) as fd:
mock_json_response = json.loads(fd.read())
response.text = lambda encoding=None: json.dumps(mock_json_response)
response.content_type = "application/json"
transport = mock.Mock(send=lambda request, **kwargs: response)
endpoint = kwargs.pop("textanalytics_test_endpoint")
key = kwargs.pop("textanalytics_test_api_key")
client = TextAnalyticsClient(endpoint, AzureKeyCredential(key), transport=transport, api_version="v3.1")
response = list(client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
RecognizePiiEntitiesAction(domain_filter="phi"),
AnalyzeSentimentAction(),
],
show_stats=True,
polling_interval=self._interval(),
).result())
assert len(response) == len(docs)
for idx, result in enumerate(response[0]):
assert result.id == "1"
if idx == 6:
assert not result.is_error
assert isinstance(result, AnalyzeSentimentResult)
else:
assert result.is_error
assert result.error.code == "InvalidRequest"
assert result.error.message == "Some error" + str(idx) # confirms correct doc error order
for idx, result in enumerate(response[1]):
assert result.id == "2"
assert result.is_error
if idx == 6:
assert result.error.code == "InvalidDocument"
assert result.error.message == "Document text is empty."
else:
assert result.error.code == "InvalidRequest"
assert result.error.message == "Some error" + str(idx) # confirms correct doc error order
@TextAnalyticsPreparer()
def test_action_errors_with_targets(
self,
**kwargs
):
docs = [
{"id": "1", "language": "en", "text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": ""},
]
response = mock.Mock(
status_code=200,
headers={"Content-Type": "application/json", "operation-location": "https://fakeurl.com"}
)
# a mix of action errors to translate to doc errors, regular doc errors, and a successful response
path_to_mock_json_response = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"./mock_test_responses/action_error_with_targets_language.json",
)
)
with open(path_to_mock_json_response) as fd:
mock_json_response = json.loads(fd.read())
response.text = lambda encoding=None: json.dumps(mock_json_response)
response.content_type = "application/json"
transport = mock.Mock(send=lambda request, **kwargs: response)
endpoint = kwargs.pop("textanalytics_test_endpoint")
key = kwargs.pop("textanalytics_test_api_key")
client = TextAnalyticsClient(endpoint, AzureKeyCredential(key), transport=transport)
# workaround to get mocked response to work with deserialized polymorphic response type
def get_deserialized_for_mock(response, deserialized, headers):
from azure.ai.textanalytics._generated.models import AnalyzeTextJobState, AnalyzeTextJobsInput
from azure.ai.textanalytics._response_handlers import analyze_paged_result
deserialized = AnalyzeTextJobState.deserialize(response.raw_response)
return analyze_paged_result(
["1", "2"],
[(_AnalyzeActionsType.RECOGNIZE_ENTITIES, '0'),
(_AnalyzeActionsType.EXTRACT_KEY_PHRASES, '1'),
(_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES, '2'),
(_AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES, '3'),
(_AnalyzeActionsType.ANALYZE_SENTIMENT, '4'),
(_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES, '5'),
(_AnalyzeActionsType.ANALYZE_SENTIMENT, '6')
],
client._client.analyze_text_job_status,
response,
deserialized,
headers,
show_stats=True,
)
response = list(client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction(),
RecognizePiiEntitiesAction(domain_filter="phi"),
AnalyzeSentimentAction(),
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=lambda resp: resp,
cls=get_deserialized_for_mock
).result())
assert len(response) == len(docs)
for idx, result in enumerate(response[0]):
assert result.id == "1"
if idx == 6:
assert not result.is_error
assert isinstance(result, AnalyzeSentimentResult)
else:
assert result.is_error
assert result.error.code == "InvalidRequest"
assert result.error.message == "Some error" + str(idx) # confirms correct doc error order
for idx, result in enumerate(response[1]):
assert result.id == "2"
assert result.is_error
if idx == 6:
assert result.error.code == "InvalidDocument"
assert result.error.message == "Document text is empty."
else:
assert result.error.code == "InvalidRequest"
assert result.error.message == "Some error" + str(idx) # confirms correct doc error order
@TextAnalyticsPreparer()
def test_action_job_failure_v3_1(
self,
**kwargs
):
docs = [
{"id": "1", "language": "en",
"text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": ""},
]
response = mock.Mock(
status_code=200,
headers={"Content-Type": "application/json", "operation-location": "https://fakeurl.com"}
)
# action job failure with status=="failed", no partial results so we raise an exception in this case
path_to_mock_json_response = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"./mock_test_responses/action_job_failure.json",
)
)
with open(path_to_mock_json_response) as fd:
mock_json_response = json.loads(fd.read())
response.text = lambda encoding=None: json.dumps(mock_json_response)
response.content_type = "application/json"
transport = mock.Mock(send=lambda request, **kwargs: response)
endpoint = kwargs.pop("textanalytics_test_endpoint")
key = kwargs.pop("textanalytics_test_api_key")
client = TextAnalyticsClient(endpoint, AzureKeyCredential(key), transport=transport, api_version="v3.1")
with pytest.raises(HttpResponseError) as e:
response = list(client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
],
show_stats=True,
polling_interval=self._interval(),
).result())
assert len(response) == len(docs)
assert e.value.message == "(InternalServerError) 1 out of 1 job tasks failed. Failed job tasks : v3.1/entities/general."
@TextAnalyticsPreparer()
def test_action_job_failure(
self,
**kwargs
):
docs = [
{"id": "1", "language": "en",
"text": "A recent report by the Government Accountability Office (GAO) found that the dramatic increase in oil and natural gas development on federal lands over the past six years has stretched the staff of the BLM to a point that it has been unable to meet its environmental protection responsibilities."},
{"id": "2", "language": "en", "text": ""},
]
response = mock.Mock(
status_code=200,
headers={"Content-Type": "application/json", "operation-location": "https://fakeurl.com"}
)
# action job failure with status=="failed", no partial results so we raise an exception in this case
path_to_mock_json_response = os.path.abspath(
os.path.join(
os.path.abspath(__file__),
"..",
"./mock_test_responses/action_job_failure_language.json",
)
)
with open(path_to_mock_json_response) as fd:
mock_json_response = json.loads(fd.read())
response.text = lambda encoding=None: json.dumps(mock_json_response)
response.content_type = "application/json"
transport = mock.Mock(send=lambda request, **kwargs: response)
endpoint = kwargs.pop("textanalytics_test_endpoint")
key = kwargs.pop("textanalytics_test_api_key")
client = TextAnalyticsClient(endpoint, AzureKeyCredential(key),
transport=transport)
# workaround to get mocked response to work with deserialized polymorphic response type
def get_deserialized_for_mock(response, deserialized, headers):
from azure.ai.textanalytics._generated.models import AnalyzeTextJobState, AnalyzeTextJobsInput
from azure.ai.textanalytics._response_handlers import analyze_paged_result
deserialized = AnalyzeTextJobState.deserialize(response.raw_response)
return analyze_paged_result(
["1", "2"],
[(_AnalyzeActionsType.EXTRACT_KEY_PHRASES, '0')],
client._client.analyze_text_job_status,
response,
deserialized,
headers,
show_stats=True,
)
with pytest.raises(HttpResponseError) as e:
response = list(client.begin_analyze_actions(
docs,
actions=[
ExtractKeyPhrasesAction()
],
show_stats=True,
polling_interval=self._interval(),
raw_response_hook=lambda resp: resp,
cls=get_deserialized_for_mock
).result())
assert len(response) == len(docs)
assert e.value.message == "(InternalServerError) 1 out of 1 job tasks failed. Failed job tasks : keyphrasescomposite."
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": "v3.1"})
@recorded_by_proxy
def test_analyze_works_with_v3_1(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result()
results = list(response)
assert len(results) == len(docs)
document_order = ["56", "0", "19", "1"]
action_order = [
_AnalyzeActionsType.RECOGNIZE_ENTITIES,
_AnalyzeActionsType.EXTRACT_KEY_PHRASES,
_AnalyzeActionsType.RECOGNIZE_PII_ENTITIES,
_AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES,
_AnalyzeActionsType.ANALYZE_SENTIMENT,
]
for doc_idx, document_results in enumerate(results):
assert len(document_results) == 5
for action_idx, document_result in enumerate(document_results):
assert document_result.id == document_order[doc_idx]
assert not document_result.is_error
assert self.document_result_to_action_type(document_result) == action_order[action_idx]
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": "v3.0"})
def test_analyze_multiapi_validate_v3_0(self, **kwargs):
client = kwargs.pop("client")
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
with pytest.raises(ValueError) as e:
response = client.begin_analyze_actions(
docs,
actions=[
RecognizeEntitiesAction(),
ExtractKeyPhrasesAction(),
RecognizePiiEntitiesAction(),
RecognizeLinkedEntitiesAction(),
AnalyzeSentimentAction()
],
polling_interval=self._interval(),
).result()
assert str(e.value) == "'begin_analyze_actions' is only available for API version v3.1 and up."
@TextAnalyticsPreparer()
@TextAnalyticsCustomPreparer()
def test_analyze_multiapi_validate_v3_1(self, **kwargs):
textanalytics_custom_text_endpoint = kwargs.pop("textanalytics_custom_text_endpoint")
textanalytics_custom_text_key = kwargs.pop("textanalytics_custom_text_key")
textanalytics_single_category_classify_project_name = kwargs.pop("textanalytics_single_category_classify_project_name")
textanalytics_single_category_classify_deployment_name = kwargs.pop("textanalytics_single_category_classify_deployment_name")
textanalytics_multi_category_classify_project_name = kwargs.pop("textanalytics_multi_category_classify_project_name")
textanalytics_multi_category_classify_deployment_name = kwargs.pop("textanalytics_multi_category_classify_deployment_name")
textanalytics_custom_entities_project_name = kwargs.pop("textanalytics_custom_entities_project_name")
textanalytics_custom_entities_deployment_name = kwargs.pop("textanalytics_custom_entities_deployment_name")
client = TextAnalyticsClient(textanalytics_custom_text_endpoint, AzureKeyCredential(textanalytics_custom_text_key), api_version="v3.1")
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
version_supported = "2022-05-01"
with pytest.raises(ValueError) as e:
response = client.begin_analyze_actions(
docs,
actions=[
SingleCategoryClassifyAction(
project_name=textanalytics_single_category_classify_project_name,
deployment_name=textanalytics_single_category_classify_deployment_name
),
MultiCategoryClassifyAction(
project_name=textanalytics_multi_category_classify_project_name,
deployment_name=textanalytics_multi_category_classify_deployment_name
),
RecognizeCustomEntitiesAction(
project_name=textanalytics_custom_entities_project_name,
deployment_name=textanalytics_custom_entities_deployment_name
),
AnalyzeHealthcareEntitiesAction()
],
polling_interval=self._interval(),
).result()
assert str(e.value) == f"'RecognizeCustomEntitiesAction' is only available for API version " \
f"{version_supported} and up.\n'SingleCategoryClassifyAction' is only available " \
f"for API version {version_supported} and up.\n'MultiCategoryClassifyAction' is " \
f"only available for API version {version_supported} and up.\n'AnalyzeHealthcareEntitiesAction' is " \
f"only available for API version {version_supported} and up.\n"
@pytest.mark.skip("code changes needed before we can run test")
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
@recorded_by_proxy
def test_healthcare_action(self, client):
docs = [
"Patient does not suffer from high blood pressure.",
"Prescribed 100mg ibuprofen, taken twice daily.",
""
]
response = list(client.begin_analyze_actions(
docs,
actions=[
AnalyzeHealthcareEntitiesAction(
model_version="latest",
fhir_version="4.0.1"
)
],
show_stats=True,
polling_interval=self._interval(),
).result())
for idx, result in enumerate(response):
for res in result:
if idx == 2:
assert res.is_error
assert res.error.code == "InvalidDocument"
else:
assert res.entities
assert res.fhir_bundle
assert res.statistics
| 45.306919
| 353
| 0.621492
|
4a1937b181ff3d49b6e216751f739d1be05163d7
| 2,598
|
py
|
Python
|
python/FFNN_MODEL_001/config--all_data.py
|
nathanieljevans/DEEP_DRUG_SH
|
52c3c8ef311aac47e0367f6d7203945af2f4575a
|
[
"MIT"
] | null | null | null |
python/FFNN_MODEL_001/config--all_data.py
|
nathanieljevans/DEEP_DRUG_SH
|
52c3c8ef311aac47e0367f6d7203945af2f4575a
|
[
"MIT"
] | null | null | null |
python/FFNN_MODEL_001/config--all_data.py
|
nathanieljevans/DEEP_DRUG_SH
|
52c3c8ef311aac47e0367f6d7203945af2f4575a
|
[
"MIT"
] | null | null | null |
'''
'''
params = {
'NAME' : 'ALL-DATA-3FC_010',
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
'DATA_DIR' : '../../data_pytorch/tensors/',
'LABEL_PATH' : '../../data_pytorch/label_dict.pkl',
'SPLIT_LABEL_PATH' : '../../data_pytorch/split_label_dict.pkl',
'MODEL_OUT_DIR' : '../../models/',
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
'RESPLIT_DATA' : False,
'TRAIN_PROP' : 0.7,
'TEST_PROP' : 0.15,
'VAL_PROP' : 0.15,
'N_BEATAML_PATIENTS_EXCLUSIVE_TO_TEST' : 30 , # remove patients for test
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
'SAVE_MODEL_EVERY' : 5, # epochs
'PRINT_MID_EPOCH_INFO' : False,
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
## `i` indicates the order of classification layers
'RESP_TYPES' : {x:i for i,x in enumerate(['RNAi_dependency',
'crispr_dependency',
'pooled_drugresp_prism',
'AUC_GDSC','CTRP_AUC',
'AUC_drug_CCLE',
'beatAML_AUC'])},
'H1' : 5000,
'H2' : 5000,
'H3' : 50, # Layer 3 - Dataset Shared
'DH' : 200, # Dataset Specific Layer
'DO' : 0.5, # Dropout
'NCONVS' : 10,
'PRINT_EVERY' : 1,
'NGENES' : 523, #/ 523
'EPOCHS' : 100,
'LEARNING_WEIGHT' : 1e-1,
'WEIGHT_DECAY' : 0.01,
'LR_DECAY_PATIENCE' : 20, # batches (not epochs)
'PRETRAIN_EPOCHS' : 1,
'PRETRAIN_LR' : 1e-1,
'PRETRAIN_WD' : 0.1,
'PRETRAIN_DO' : 0.9,
'PRETRAIN_MSE_WEIGHT' : 50, # Weight applied to target MSE
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
'REPRODUCIBLE' : False,
'SEED' : 1000,
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
'train_params' : {'batch_size': 2**14, # 16384
'shuffle': True,
'num_workers': 12},
'test_params' : {'batch_size': 2**14,
'shuffle': False,
'num_workers': 12}
}
| 38.205882
| 82
| 0.339107
|
4a19390e5fe804d9793faf1f8a9f13d0ab155b85
| 2,767
|
py
|
Python
|
tnia/io/bioformats_helper.py
|
True-North-Intelligent-Algorithms/tnia-python
|
ab580716082e0ec655a27eb856dae629ebb1a3bf
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T16:30:05.000Z
|
2022-02-18T22:28:50.000Z
|
tnia/io/bioformats_helper.py
|
True-North-Intelligent-Algorithms/tnia-python
|
ab580716082e0ec655a27eb856dae629ebb1a3bf
|
[
"BSD-3-Clause"
] | null | null | null |
tnia/io/bioformats_helper.py
|
True-North-Intelligent-Algorithms/tnia-python
|
ab580716082e0ec655a27eb856dae629ebb1a3bf
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Created on Wed Mar 17 09:22:33 2021
@author: bnorthan
"""
import javabridge
import bioformats as bf
import numpy as np
def start_jvm():
javabridge.start_vm(class_path=bf.JARS)
def kill_jvm():
javabridge.kill_vm()
def load_volume(filename, c=0, series=0):
""" load a volume (all x,y,z) from a bioformats 5D (x,y,z,c,s) series. s is series number
Args:
filename (string): name of bioformats supported file
c (int, optional): channel number. Defaults to 0.
series (int, optional): series number. Defaults to 0.
Returns:
[3D numpy Array]: xyz volume at channel c and series number s
"""
meta=bf.get_omexml_metadata(filename)
o=bf.OMEXML(meta)
size_x=o.image(2).Pixels.get_PhysicalSizeX()
size_y=o.image(2).Pixels.get_PhysicalSizeY()
size_z=o.image(2).Pixels.get_PhysicalSizeZ()
nz=o.image(series).Pixels.SizeZ
img=bf.load_image(filename, z=0,c=c,series=series,rescale=False)
img=img[np.newaxis,...]
for cz in range(1,nz):
temp=bf.load_image(filename, z=cz, c=c, series=series, rescale=False)
img=np.vstack((img, temp[np.newaxis,...]))
return img, size_x, size_y, size_z
def load_channel(filename, nz,c):
img=bf.load_image(filename, z=0,c=c,rescale=False)
img=img[np.newaxis,...]
for cz in range(1,nz):
print()
print()
print()
print('read slice',cz)
temp=bf.load_image(filename, z=cz, c=c, rescale=False)
img=np.vstack((img, temp[np.newaxis,...]))
return img
def load_plane(filename,z,c):
return bf.load_image(filename, z=z,c=c,rescale=False)
def save_czyx(file_name, img, channel_names=None):
""" save a 4d image (c,z,y,x) order
Args:
filename ([type]): [description]
img ([type]): [description]
"""
if channel_names == None:
channel_names = ['a','b','c','d']
nc = img.shape[0]
nz = img.shape[1]
ny = img.shape[2]
nx = img.shape[3]
for z in range(nz):
for c in range(nc):
bf.write_image(file_name,img[c,z,:,:],'uint16',c=c,z=z,t=0,size_t=1,size_c=nc, size_z=nz, channel_names=channel_names)
def save_zcyx(file_name, img, channel_names=None):
""" save a 4d image (c,z,y,x) order
Args:
filename ([type]): [description]
img ([type]): [description]
"""
if channel_names == None:
channel_names = ['a','b','c','d']
nz = img.shape[0]
nc = img.shape[1]
ny = img.shape[2]
nx = img.shape[3]
for z in range(nz):
for c in range(nc):
bf.write_image(file_name,img[z,c,:,:],'uint16',c=c,z=z,t=0,size_t=1,size_c=nc, size_z=nz, channel_names=channel_names)
| 27.127451
| 130
| 0.603903
|
4a193a07b0c86c9feb8cd375f9d12a8d3e930388
| 943
|
py
|
Python
|
src/grokcore/viewlet/tests/base/viewlet/viewlet_render_and_template.py
|
zopefoundation/grokcore.viewlet
|
058cb9ca89aef705c9194e7a79b9e18a43012dbc
|
[
"ZPL-2.1"
] | null | null | null |
src/grokcore/viewlet/tests/base/viewlet/viewlet_render_and_template.py
|
zopefoundation/grokcore.viewlet
|
058cb9ca89aef705c9194e7a79b9e18a43012dbc
|
[
"ZPL-2.1"
] | 2
|
2018-01-04T15:10:32.000Z
|
2021-01-18T12:53:29.000Z
|
src/grokcore/viewlet/tests/base/viewlet/viewlet_render_and_template.py
|
zopefoundation/grokcore.viewlet
|
058cb9ca89aef705c9194e7a79b9e18a43012dbc
|
[
"ZPL-2.1"
] | 1
|
2018-01-12T06:53:28.000Z
|
2018-01-12T06:53:28.000Z
|
"""
A viewlet is not allowed to define its own render method and have a template
associated with it at the same time.
# PY2 - remove '+IGNORE_EXCEPTION_DETAIL' when dropping Python 2 support:
>>> grok.testing.grok(__name__) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
zope.configuration.config.ConfigurationExecutionError: \
martian.error.GrokError: Multiple possible ways to render viewlet \
<class \
'grokcore.viewlet.tests.base.viewlet.viewlet_render_and_template.Viewlet'>. \
It has both a 'render' method as well as an associated template.
"""
import grokcore.viewlet as grok
from zope.interface import Interface
class ViewletManager(grok.ViewletManager):
grok.name('foo')
grok.context(Interface)
class Viewlet(grok.Viewlet):
grok.viewletmanager(ViewletManager)
grok.context(Interface)
def render(self):
return u"Render method but also a template!"
| 29.46875
| 79
| 0.747614
|
4a193a7b4a515dd75002a56f0c2c53fe87c5ff2d
| 4,411
|
py
|
Python
|
Opportunity/test_vanilla.py
|
ardywibowo/vfds
|
f31e187c0108d2d675d878300f13dbabc2fa473a
|
[
"MIT"
] | null | null | null |
Opportunity/test_vanilla.py
|
ardywibowo/vfds
|
f31e187c0108d2d675d878300f13dbabc2fa473a
|
[
"MIT"
] | null | null | null |
Opportunity/test_vanilla.py
|
ardywibowo/vfds
|
f31e187c0108d2d675d878300f13dbabc2fa473a
|
[
"MIT"
] | null | null | null |
import argparse
import json
import os
import time
import numpy as np
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from VanillaGRU import VanillaGRU
from OpportunityDataset import OpportunityDataset
from plotting import plot_single_trajectory
import multiprocessing
multiprocessing.set_start_method("spawn", True)
# Arguments
parser = argparse.ArgumentParser(description='Adaptive Concrete')
# Settings
parser.add_argument('--gpu', type=int, default=3, help='init learning rate')
parser.add_argument('--weights', type=str, default="Test", help='weight name')
parser.add_argument('--n_hidden', type=int, default=256, help='hidden units')
parser.add_argument('--n_layers', type=int, default=2, help='GRU layers')
parser.add_argument('--seg_len', type=int, default=100, help='Segment length')
parser.add_argument('--interpolate', type=bool, default=False, help='linearly interpolate features')
parser.add_argument('--remove_feat', type=bool, default=False, help='remove features according to paper')
parser.add_argument('--plot', type=bool, default=False, help='Plot')
args = parser.parse_args()
def test(model, test_loader, label_type, epochs):
inputs, labels = next(iter(test_loader))
[batch_size, seq_length, num_features] = inputs.size()
cur_time = time.time()
pre_time = time.time()
use_gpu = torch.cuda.is_available()
tested_batch = 0
errors = 0
num_samples = 0
index = 0
while True:
for inputs, labels in test_loader:
inputs = inputs.float()
labels = labels.long()
labels = labels.permute(1, 0) # seq_length x batch
labels = labels.flatten() # seq_length * batch
if inputs.shape[0] != batch_size:
continue
if use_gpu:
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
outputs = model(inputs)
outputs = torch.cat(outputs) # seq_length * batch x num_classes
prediction = torch.argmax(outputs, dim=1)
if args.plot:
save_dir = os.path.join(args.weights, 'plots' + str(label_type), str(index) + '.png')
plot_single_trajectory(prediction, labels, selection_weights, save_dir=save_dir)
index += 1
labelled_predictions = prediction[labels != -1]
labelled_labels = labels[labels != -1]
errors += torch.sum(labelled_predictions != labelled_labels).cpu().detach().numpy()
num_samples += labelled_labels.shape[0]
tested_batch += 1
if tested_batch % 10 == 0:
cur_time = time.time()
print('Tested #: {}, errors: {}, time: {}'.format( \
tested_batch * batch_size, \
np.around([errors / num_samples], decimals=8), \
np.around([cur_time - pre_time], decimals=8) ) )
pre_time = cur_time
if tested_batch > epochs:
break
print('Label Type: {}, Tested: errors: {}'.format(label_type, errors / num_samples))
def create_dir(path):
if not os.path.exists(path):
os.mkdir(path)
if __name__ == "__main__":
torch.cuda.set_device(args.gpu)
test_data = OpportunityDataset('../OpportunityUCIDataset/', split="test", segment_length=args.seg_len,
interpolate=args.interpolate, remove_features=args.remove_feat)
num_classes = test_data.num_classes
label_type = 6
n_c = num_classes[label_type]
print("Testing Label Type: {}".format(label_type))
plot_dir = os.path.join(args.weights, 'plots' + str(label_type))
create_dir(plot_dir)
test_data.select_label_type(label_type)
test_loader = DataLoader(test_data, num_workers=1, shuffle=False, batch_size=1)
inputs, labels = next(iter(test_loader))
[batch_size, seq_length, input_size] = inputs.size()
model = VanillaGRU(input_size, args.n_hidden, n_c, args.n_layers)
model.load_state_dict(torch.load(args.weights + "/best_model" + str(label_type) + ".pt",
map_location="cuda:{}".format(args.gpu)))
model = model.to(args.gpu)
test(model, test_loader, label_type, 6000)
| 35.861789
| 106
| 0.639084
|
4a193a93a45e5b78844161331ad46a577e670047
| 5,791
|
py
|
Python
|
lib/galaxy/webapps/galaxy/services/sharable.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/webapps/galaxy/services/sharable.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | 6
|
2021-11-11T20:57:49.000Z
|
2021-12-10T15:30:33.000Z
|
lib/galaxy/webapps/galaxy/services/sharable.py
|
beatrizserrano/galaxy
|
e149d9d32e1bca6c07c38b1a9cdabfee60323610
|
[
"CC-BY-3.0"
] | null | null | null |
import logging
from typing import (
List,
Optional,
Set,
Tuple,
)
from sqlalchemy import false
from galaxy import exceptions
from galaxy.managers import base
from galaxy.managers.sharable import (
SharableModelManager,
SharableModelSerializer,
)
from galaxy.model import User
from galaxy.schema.fields import EncodedDatabaseIdField
from galaxy.schema.schema import (
SetSlugPayload,
ShareWithPayload,
ShareWithStatus,
SharingOptions,
SharingStatus,
)
log = logging.getLogger(__name__)
class ShareableService:
"""
Provides the common logic used by the API to share *any* kind of
resource with other users.
The Manager class of the particular resource must implement the SharableModelManager
and have a compatible SharableModelSerializer implementation.
"""
def __init__(self, manager: SharableModelManager, serializer: SharableModelSerializer) -> None:
self.manager = manager
self.serializer = serializer
def set_slug(self, trans, id: EncodedDatabaseIdField, payload: SetSlugPayload):
item = self._get_item_by_id(trans, id)
self.manager.set_slug(item, payload.new_slug, trans.user)
def sharing(self, trans, id: EncodedDatabaseIdField) -> SharingStatus:
"""Gets the current sharing status of the item with the given id."""
item = self._get_item_by_id(trans, id)
return self._get_sharing_status(trans, item)
def enable_link_access(self, trans, id: EncodedDatabaseIdField) -> SharingStatus:
"""Makes this item accessible by link.
If this item contains other elements they will be publicly accessible too.
"""
item = self._get_item_by_id(trans, id)
self.manager.make_members_public(trans, item)
self.manager.make_importable(item)
return self._get_sharing_status(trans, item)
def disable_link_access(self, trans, id: EncodedDatabaseIdField) -> SharingStatus:
item = self._get_item_by_id(trans, id)
self.manager.make_non_importable(item)
return self._get_sharing_status(trans, item)
def publish(self, trans, id: EncodedDatabaseIdField) -> SharingStatus:
"""Makes this item publicly accessible.
If this item contains other elements they will be publicly accessible too.
"""
item = self._get_item_by_id(trans, id)
self.manager.make_members_public(trans, item)
self.manager.publish(item)
return self._get_sharing_status(trans, item)
def unpublish(self, trans, id: EncodedDatabaseIdField) -> SharingStatus:
item = self._get_item_by_id(trans, id)
self.manager.unpublish(item)
return self._get_sharing_status(trans, item)
def share_with_users(self, trans, id: EncodedDatabaseIdField, payload: ShareWithPayload) -> ShareWithStatus:
item = self._get_item_by_id(trans, id)
users, errors = self._get_users(trans, payload.user_ids)
extra = self._share_with_options(trans, item, users, errors, payload.share_option)
base_status = self._get_sharing_status(trans, item)
status = ShareWithStatus.parse_obj(base_status)
status.extra = extra
status.errors.extend(errors)
return status
def _share_with_options(
self,
trans,
item,
users: Set[User],
errors: Set[str],
share_option: Optional[SharingOptions] = None,
):
extra = self.manager.get_sharing_extra_information(trans, item, users, errors, share_option)
if not extra or extra.can_share:
self.manager.update_current_sharing_with_users(item, users)
extra = None
return extra
def _get_item_by_id(self, trans, id: EncodedDatabaseIdField):
class_name = self.manager.model_class.__name__
item = base.get_object(trans, id, class_name, check_ownership=True, check_accessible=True, deleted=False)
return item
def _get_sharing_status(self, trans, item):
status = self.serializer.serialize_to_view(item, user=trans.user, trans=trans, default_view="sharing")
status["users_shared_with"] = [
{"id": self.manager.app.security.encode_id(a.user.id), "email": a.user.email}
for a in item.users_shared_with
]
return SharingStatus.parse_obj(status)
def _get_users(self, trans, emails_or_ids: Optional[List] = None) -> Tuple[Set[User], Set[str]]:
if emails_or_ids is None:
raise exceptions.MessageException("Missing required user IDs or emails")
send_to_users: Set[User] = set()
send_to_err: Set[str] = set()
for email_or_id in set(emails_or_ids):
email_or_id = email_or_id.strip()
if not email_or_id:
continue
send_to_user = None
if "@" in email_or_id:
email_address = email_or_id
send_to_user = self.manager.user_manager.by_email(
email_address, filters=[User.table.c.deleted == false()]
)
else:
try:
decoded_user_id = trans.security.decode_id(email_or_id)
send_to_user = self.manager.user_manager.by_id(decoded_user_id)
if send_to_user.deleted:
send_to_user = None
except exceptions.MalformedId:
send_to_user = None
if not send_to_user:
send_to_err.add(f"{email_or_id} is not a valid Galaxy user.")
elif send_to_user == trans.user:
send_to_err.add("You cannot share resources with yourself.")
else:
send_to_users.add(send_to_user)
return send_to_users, send_to_err
| 38.606667
| 113
| 0.664652
|
4a193aabafd07573d83ffb497afa5a8a6e9302fc
| 99,019
|
py
|
Python
|
kubernetes/client/api/apiregistration_v1_api.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 2
|
2021-03-09T12:42:05.000Z
|
2021-03-09T13:27:50.000Z
|
kubernetes/client/api/apiregistration_v1_api.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 7
|
2021-04-13T03:04:42.000Z
|
2022-03-02T03:10:18.000Z
|
kubernetes/client/api/apiregistration_v1_api.py
|
carloscastrojumo/python
|
f461dd42d48650a4ae1b41d630875cad9fcb68ad
|
[
"Apache-2.0"
] | 1
|
2019-07-05T07:54:10.000Z
|
2019-07-05T07:54:10.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.17
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from kubernetes.client.api_client import ApiClient
from kubernetes.client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ApiregistrationV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_api_service(self, body, **kwargs): # noqa: E501
"""create_api_service # noqa: E501
create an APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_api_service(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_api_service_with_http_info(body, **kwargs) # noqa: E501
def create_api_service_with_http_info(self, body, **kwargs): # noqa: E501
"""create_api_service # noqa: E501
create an APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_api_service_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_api_service" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `create_api_service`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_api_service(self, name, **kwargs): # noqa: E501
"""delete_api_service # noqa: E501
delete an APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_api_service(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_api_service_with_http_info(name, **kwargs) # noqa: E501
def delete_api_service_with_http_info(self, name, **kwargs): # noqa: E501
"""delete_api_service # noqa: E501
delete an APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_api_service_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'dry_run',
'grace_period_seconds',
'orphan_dependents',
'propagation_policy',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_api_service" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `delete_api_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_api_service(self, **kwargs): # noqa: E501
"""delete_collection_api_service # noqa: E501
delete collection of APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_api_service(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_collection_api_service_with_http_info(**kwargs) # noqa: E501
def delete_collection_api_service_with_http_info(self, **kwargs): # noqa: E501
"""delete_collection_api_service # noqa: E501
delete collection of APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_api_service_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param V1DeleteOptions body:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Status, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'_continue',
'dry_run',
'field_selector',
'grace_period_seconds',
'label_selector',
'limit',
'orphan_dependents',
'propagation_policy',
'resource_version',
'timeout_seconds',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_api_service" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'grace_period_seconds' in local_var_params and local_var_params['grace_period_seconds'] is not None: # noqa: E501
query_params.append(('gracePeriodSeconds', local_var_params['grace_period_seconds'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'orphan_dependents' in local_var_params and local_var_params['orphan_dependents'] is not None: # noqa: E501
query_params.append(('orphanDependents', local_var_params['orphan_dependents'])) # noqa: E501
if 'propagation_policy' in local_var_params and local_var_params['propagation_policy'] is not None: # noqa: E501
query_params.append(('propagationPolicy', local_var_params['propagation_policy'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_resources_with_http_info(**kwargs) # noqa: E501
def get_api_resources_with_http_info(self, **kwargs): # noqa: E501
"""get_api_resources # noqa: E501
get available resources # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIResourceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_api_service(self, **kwargs): # noqa: E501
"""list_api_service # noqa: E501
list or watch objects of kind APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_api_service(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIServiceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_api_service_with_http_info(**kwargs) # noqa: E501
def list_api_service_with_http_info(self, **kwargs): # noqa: E501
"""list_api_service # noqa: E501
list or watch objects of kind APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_api_service_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str pretty: If 'true', then the output is pretty printed.
:param bool allow_watch_bookmarks: allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIServiceList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'pretty',
'allow_watch_bookmarks',
'_continue',
'field_selector',
'label_selector',
'limit',
'resource_version',
'timeout_seconds',
'watch'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_api_service" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'allow_watch_bookmarks' in local_var_params and local_var_params['allow_watch_bookmarks'] is not None: # noqa: E501
query_params.append(('allowWatchBookmarks', local_var_params['allow_watch_bookmarks'])) # noqa: E501
if '_continue' in local_var_params and local_var_params['_continue'] is not None: # noqa: E501
query_params.append(('continue', local_var_params['_continue'])) # noqa: E501
if 'field_selector' in local_var_params and local_var_params['field_selector'] is not None: # noqa: E501
query_params.append(('fieldSelector', local_var_params['field_selector'])) # noqa: E501
if 'label_selector' in local_var_params and local_var_params['label_selector'] is not None: # noqa: E501
query_params.append(('labelSelector', local_var_params['label_selector'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'resource_version' in local_var_params and local_var_params['resource_version'] is not None: # noqa: E501
query_params.append(('resourceVersion', local_var_params['resource_version'])) # noqa: E501
if 'timeout_seconds' in local_var_params and local_var_params['timeout_seconds'] is not None: # noqa: E501
query_params.append(('timeoutSeconds', local_var_params['timeout_seconds'])) # noqa: E501
if 'watch' in local_var_params and local_var_params['watch'] is not None: # noqa: E501
query_params.append(('watch', local_var_params['watch'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIServiceList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_api_service(self, name, body, **kwargs): # noqa: E501
"""patch_api_service # noqa: E501
partially update the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_api_service_with_http_info(name, body, **kwargs) # noqa: E501
def patch_api_service_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_api_service # noqa: E501
partially update the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_api_service" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_api_service`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_api_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_api_service_status(self, name, body, **kwargs): # noqa: E501
"""patch_api_service_status # noqa: E501
partially update status of the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_api_service_status_with_http_info(name, body, **kwargs) # noqa: E501
def patch_api_service_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""patch_api_service_status # noqa: E501
partially update status of the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_api_service_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).
:param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager',
'force'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_api_service_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `patch_api_service_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `patch_api_service_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
if 'force' in local_var_params and local_var_params['force'] is not None: # noqa: E501
query_params.append(('force', local_var_params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json', 'application/apply-patch+yaml']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_api_service(self, name, **kwargs): # noqa: E501
"""read_api_service # noqa: E501
read the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_api_service_with_http_info(name, **kwargs) # noqa: E501
def read_api_service_with_http_info(self, name, **kwargs): # noqa: E501
"""read_api_service # noqa: E501
read the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
:param bool export: Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty',
'exact',
'export'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_api_service" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_api_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'exact' in local_var_params and local_var_params['exact'] is not None: # noqa: E501
query_params.append(('exact', local_var_params['exact'])) # noqa: E501
if 'export' in local_var_params and local_var_params['export'] is not None: # noqa: E501
query_params.append(('export', local_var_params['export'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def read_api_service_status(self, name, **kwargs): # noqa: E501
"""read_api_service_status # noqa: E501
read status of the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service_status(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.read_api_service_status_with_http_info(name, **kwargs) # noqa: E501
def read_api_service_status_with_http_info(self, name, **kwargs): # noqa: E501
"""read_api_service_status # noqa: E501
read status of the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_api_service_status_with_http_info(name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param str pretty: If 'true', then the output is pretty printed.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'pretty'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method read_api_service_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `read_api_service_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_api_service(self, name, body, **kwargs): # noqa: E501
"""replace_api_service # noqa: E501
replace the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_api_service_with_http_info(name, body, **kwargs) # noqa: E501
def replace_api_service_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_api_service # noqa: E501
replace the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_api_service" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_api_service`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_api_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_api_service_status(self, name, body, **kwargs): # noqa: E501
"""replace_api_service_status # noqa: E501
replace status of the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIService
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.replace_api_service_status_with_http_info(name, body, **kwargs) # noqa: E501
def replace_api_service_status_with_http_info(self, name, body, **kwargs): # noqa: E501
"""replace_api_service_status # noqa: E501
replace status of the specified APIService # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_api_service_status_with_http_info(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str name: name of the APIService (required)
:param V1APIService body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIService, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'name',
'body',
'pretty',
'dry_run',
'field_manager'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_api_service_status" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'name' is set
if self.api_client.client_side_validation and ('name' not in local_var_params or # noqa: E501
local_var_params['name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `name` when calling `replace_api_service_status`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `replace_api_service_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'name' in local_var_params:
path_params['name'] = local_var_params['name'] # noqa: E501
query_params = []
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'dry_run' in local_var_params and local_var_params['dry_run'] is not None: # noqa: E501
query_params.append(('dryRun', local_var_params['dry_run'])) # noqa: E501
if 'field_manager' in local_var_params and local_var_params['field_manager'] is not None: # noqa: E501
query_params.append(('fieldManager', local_var_params['field_manager'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/apiregistration.k8s.io/v1/apiservices/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIService', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 63.924467
| 1,390
| 0.655359
|
4a193ab0f704f8539fc31af7c4d001a339f3c151
| 7,045
|
py
|
Python
|
jyotisha/rest_api/api_v1.py
|
hareeshbabu82ns/jyotisha
|
45ac19e999174cb64c239c1e4ccfb33bc5424137
|
[
"MIT"
] | null | null | null |
jyotisha/rest_api/api_v1.py
|
hareeshbabu82ns/jyotisha
|
45ac19e999174cb64c239c1e4ccfb33bc5424137
|
[
"MIT"
] | null | null | null |
jyotisha/rest_api/api_v1.py
|
hareeshbabu82ns/jyotisha
|
45ac19e999174cb64c239c1e4ccfb33bc5424137
|
[
"MIT"
] | null | null | null |
import logging
import swisseph as swe
import flask_restx
from flask import Blueprint
from flask_restx import Resource
from flask_restx import reqparse
import jyotisha.panchangam.spatio_temporal.annual
import jyotisha.panchangam.spatio_temporal.daily
from jyotisha.panchangam import scripts
from jyotisha.panchangam.spatio_temporal import City
from jyotisha.panchangam.temporal import festival
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
URL_PREFIX = '/v1'
api_blueprint = Blueprint(
'panchanga', __name__,
template_folder='templates'
)
api = flask_restx.Api(app=api_blueprint, version='1.0', title='jyotisha panchanga API',
description='For detailed intro and to report issues: see <a href="https://github.com/sanskrit-coders/jyotisha">here</a>. '
'A list of REST and non-REST API routes avalilable on this server: <a href="../sitemap">sitemap</a>.',
default_label=api_blueprint.name,
prefix=URL_PREFIX, doc='/docs')
def get_body_id(body_name):
body_id = -1
if body_name == "sun":
body_id = swe.SUN
elif body_name == "moon":
body_id = swe.MOON
elif body_name == "jupiter":
body_id = swe.JUPITER
elif body_name == "venus":
body_id = swe.VENUS
elif body_name == "mercury":
body_id = swe.MERCURY
elif body_name == "mars":
body_id = swe.MARS
elif body_name == "saturn":
body_id = swe.SATURN
return body_id
# noinspection PyUnresolvedReferences
@api.route('/calendars/coordinates/<string:latitude>/<string:longitude>/years/<string:year>')
# TODO: How to set default values for latitude and logitude here??
# Questions at: https://github.com/noirbizarre/flask-restplus/issues/381 and stackoverflow linked therein.
class DailyCalendarHandler(Resource):
get_parser = reqparse.RequestParser()
get_parser.add_argument('timezone', type=str, default='Asia/Calcutta', help='Example: Asia/Calcutta', location='args', required=True)
get_parser.add_argument('encoding', type=str, default='devanagari', help='Example: iast, devanagari, kannada, tamil', location='args',
required=True)
@api.expect(get_parser)
def get(self, latitude, longitude, year):
args = self.get_parser.parse_args()
city = City("", latitude, longitude, args['timezone'])
panchangam = jyotisha.panchangam.spatio_temporal.annual.get_panchangam(city=city, year=int(year), script=args['encoding'])
return panchangam.to_json_map()
# noinspection PyUnresolvedReferences
@api.route('/kaalas/coordinates/<string:latitude>/<string:longitude>/years/<string:year>/months/<int:month>/days/<int:day>/')
class KaalaHandler(Resource):
get_parser = reqparse.RequestParser()
get_parser.add_argument('timezone', type=str, default='Asia/Calcutta', help='Example: Asia/Calcutta', location='args', required=True)
get_parser.add_argument('encoding', type=str, default='devanagari', help='Example: iast, devanagari, kannada, tamil', location='args',
required=True)
get_parser.add_argument('format', type=str, default='hh:mm:ss*', help='Example: hh:mm:ss*, hh:mm', location='args', required=True)
@api.expect(get_parser)
def get(self, latitude, longitude, year, month, day):
args = self.get_parser.parse_args()
city = City("", latitude, longitude, args['timezone'])
panchangam = jyotisha.panchangam.spatio_temporal.daily.DailyPanchanga(city=city, year=int(year), month=int(month), day=int(day))
return panchangam.get_kaalas_local_time(format=args['format'])
# noinspection PyUnresolvedReferences
@api.route('/events/<string:id>')
class EventFinder(Resource):
def get(self, id):
festival.fill_festival_id_to_json()
festival_data = festival.festival_id_to_json.get(id)
return festival_data
# noinspection PyUnresolvedReferences
@api.route('/times/utc_offsets/<string:offset>/years/<int:year>/months/<int:month>/days/<int:day>/hours/<int:hour>/minutes/<int:minute>/seconds/<int:second>/bodies/<string:body>/nakshatra')
class NakshatraFinder(Resource):
def get(self, body, offset, year, month, day, hour, minute, second):
from jyotisha import zodiac
(utc_year, utc_month, utc_day, utc_hour, utc_minute, utc_second) = swe.utc_time_zone(year, month, day, hour, minute, second, float(offset))
julday = swe.utc_to_jd(year=utc_year, month=utc_month, day=utc_day, hour=utc_hour, minutes=utc_minute, seconds=utc_second, flag=swe.GREG_CAL)[0]
lahiri_nakshatra_division = zodiac.NakshatraDivision(julday=julday)
body_id = get_body_id(body_name=body)
if body == "moon":
from jyotisha.panchangam import temporal
logging.debug(temporal.get_nakshatram(julday))
nakshatra = lahiri_nakshatra_division.get_nakshatra(body_id=body_id)
logging.info(nakshatra)
return str(nakshatra)
# return "haha"
# noinspection PyUnresolvedReferences
@api.route('/times/utc_offsets/<string:offset>/years/<int:year>/months/<int:month>/days/<int:day>/hours/<int:hour>/minutes/<int:minute>/seconds/<int:second>/raashi')
class RaashiFinder(Resource):
def get(self, offset, year, month, day, hour, minute, second):
(utc_year, utc_month, utc_day, utc_hour, utc_minute, utc_second) = swe.utc_time_zone(year, month, day, hour, minute, second, float(offset))
julday = swe.utc_to_jd(year=utc_year, month=utc_month, day=utc_day, hour=utc_hour, minutes=utc_minute, seconds=utc_second, flag=swe.GREG_CAL)[0]
from jyotisha.panchangam import temporal
raashi = temporal.get_solar_rashi(jd=julday)
logging.info(raashi)
return str(raashi)
# return "haha"
# noinspection PyUnresolvedReferences
@api.route('/times/utc_offsets/<string:offset>/years/<int:year>/months/<int:month>/days/<int:day>/hours/<int:hour>/minutes/<int:minute>/seconds/<int:second>/bodies/<string:body>/raashi_transition_100_days')
class RaashiTransitionFinder(Resource):
def get(self, offset, year, month, day, hour, minute, second, body):
from jyotisha import zodiac
(utc_year, utc_month, utc_day, utc_hour, utc_minute, utc_second) = swe.utc_time_zone(year=year, month=month, day=day, hour=hour, minutes=minute, seconds=second, offset=float(offset))
julday = swe.utc_to_jd(year=utc_year, month=utc_month, day=utc_day, hour=utc_hour, minutes=utc_minute, seconds=utc_second, flag=swe.GREG_CAL)[0]
body_id = get_body_id(body_name=body)
from jyotisha.panchangam import temporal
transits = temporal.get_planet_next_transit(jd_start=julday, jd_end = julday + 100, planet=body_id)
# logging.debug(transits)
transits_utc = [(swe.jdut1_to_utc(ut=transit[0], flag=swe.GREG_CAL), transit[1], transit[2]) for transit in transits]
transits_local = [(swe.utc_time_zone(year=transit[0][0], month=transit[0][1], day=transit[0][2], hour=transit[0][3], minutes=transit[0][4], seconds=int(transit[0][5]), offset=-float(offset)), transit[1], transit[2]) for transit in transits_utc]
return str(transits_local)
| 49.265734
| 248
| 0.731157
|
4a193c00afb966eda364213120de20a033401122
| 4,399
|
py
|
Python
|
vulnwhisp/frameworks/qualys_vuln.py
|
tylerwebb30/VulnWhisperer
|
4ea384c9ccc3253f38c9ad7da7dd24d072652fa3
|
[
"Apache-2.0"
] | null | null | null |
vulnwhisp/frameworks/qualys_vuln.py
|
tylerwebb30/VulnWhisperer
|
4ea384c9ccc3253f38c9ad7da7dd24d072652fa3
|
[
"Apache-2.0"
] | null | null | null |
vulnwhisp/frameworks/qualys_vuln.py
|
tylerwebb30/VulnWhisperer
|
4ea384c9ccc3253f38c9ad7da7dd24d072652fa3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Nathan Young'
import xml.etree.ElementTree as ET
import pandas as pd
import qualysapi
import requests
import sys
import logging
import os
import dateutil.parser as dp
class qualysWhisperAPI(object):
SCANS = 'api/2.0/fo/scan'
def __init__(self, config=None):
self.logger = logging.getLogger('qualysWhisperAPI')
self.config = config
try:
self.qgc = qualysapi.connect(config, 'qualys_vuln')
# Fail early if we can't make a request or auth is incorrect
self.qgc.request('about.php')
self.logger.info('Connected to Qualys at {}'.format(self.qgc.server))
except Exception as e:
self.logger.error('Could not connect to Qualys: {}'.format(str(e)))
# FIXME: exit(1) does not exist: either it's exit() or sys.exit(CODE)
exit(1)
def scan_xml_parser(self, xml):
all_records = []
root = ET.XML(xml)
for child in root.find('.//SCAN_LIST'):
all_records.append({
'name': child.find('TITLE').text,
'id': child.find('REF').text,
'date': child.find('LAUNCH_DATETIME').text,
'type': child.find('TYPE').text,
'duration': child.find('DURATION').text,
'status': child.find('.//STATE').text,
})
return pd.DataFrame(all_records)
def get_all_scans(self):
parameters = {
'action': 'list',
'echo_request': 0,
'show_op': 0,
'launched_after_datetime': '0001-01-01'
}
scans_xml = self.qgc.request(self.SCANS, parameters)
return self.scan_xml_parser(scans_xml)
def get_scan_details(self, scan_id=None):
parameters = {
'action': 'fetch',
'echo_request': 0,
'output_format': 'json_extended',
'mode': 'extended',
'scan_ref': scan_id
}
scan_json = self.qgc.request(self.SCANS, parameters)
# First two columns are metadata we already have
# Last column corresponds to "target_distribution_across_scanner_appliances" element
# which doesn't follow the schema and breaks the pandas data manipulation
return pd.read_json(scan_json).iloc[2:-1]
class qualysUtils:
def __init__(self):
self.logger = logging.getLogger('qualysUtils')
def iso_to_epoch(self, dt):
out = dp.parse(dt).strftime('%s')
self.logger.info('Converted {} to {}'.format(dt, out))
return out
class qualysVulnScan:
def __init__(
self,
config=None,
file_in=None,
file_stream=False,
delimiter=',',
quotechar='"',
):
self.logger = logging.getLogger('qualysVulnScan')
self.file_in = file_in
self.file_stream = file_stream
self.report = None
self.utils = qualysUtils()
if config:
try:
self.qw = qualysWhisperAPI(config=config)
except Exception as e:
self.logger.error('Could not load config! Please check settings. Error: {}'.format(str(e)))
if file_stream:
self.open_file = file_in.splitlines()
elif file_in:
self.open_file = open(file_in, 'rb')
self.downloaded_file = None
def process_data(self, scan_id=None):
"""Downloads a file from Qualys and normalizes it"""
self.logger.info('Downloading scan ID: {}'.format(scan_id))
scan_report = self.qw.get_scan_details(scan_id=scan_id)
if not scan_report.empty:
keep_columns = ['category', 'cve_id', 'cvss3_base', 'cvss3_temporal', 'cvss_base', 'cvss_temporal', 'dns', 'exploitability', 'fqdn', 'impact', 'ip', 'ip_status', 'netbios', 'os', 'pci_vuln', 'port', 'protocol', 'qid', 'results', 'severity', 'solution', 'ssl', 'threat', 'title', 'type', 'vendor_reference']
scan_report = scan_report.filter(keep_columns)
scan_report['severity'] = scan_report['severity'].astype(int).astype(str)
scan_report['qid'] = scan_report['qid'].astype(int).astype(str)
else:
self.logger.warn('Scan ID {} has no vulnerabilities, skipping.'.format(scan_id))
return scan_report
return scan_report
| 35.764228
| 318
| 0.590589
|
4a193c60e2fa4d99e56edb7abe0ef10d95aab3b5
| 1,780
|
py
|
Python
|
dlcv/utils.py
|
j2zhao/deeplens-cv
|
39f78a72e08997e36754914e7a3875858d5e784c
|
[
"MIT"
] | null | null | null |
dlcv/utils.py
|
j2zhao/deeplens-cv
|
39f78a72e08997e36754914e7a3875858d5e784c
|
[
"MIT"
] | null | null | null |
dlcv/utils.py
|
j2zhao/deeplens-cv
|
39f78a72e08997e36754914e7a3875858d5e784c
|
[
"MIT"
] | 1
|
2022-01-08T21:09:07.000Z
|
2022-01-08T21:09:07.000Z
|
"""This file is part of DeepLens which is released under MIT License and
is copyrighted by the University of Chicago. This project is developed by
the database group (chidata).
utils.py defines some utilities that can be used for debugging and manipulating
image streams.
"""
import cv2
import numpy as np
#plays video stream through the system player
def play(vstream):
for frame in vstream:
cv2.imshow('Player',frame['data'])
if cv2.waitKey(3) & 0xFF == ord('q'):
break
#shows a single frame
def show(frame):
cv2.imshow('Debug',frame)
cv2.waitKey(0)
#overlays a bounding box with labels over a frame
def overlay(frame, bbs):
ff = np.copy(frame)
for label, bb in bbs:
cv2.rectangle(ff, (bb[0],bb[2]), (bb[1],bb[3]),(0,255,0), 2)
cv2.putText(ff, label, (bb[0],bb[2]), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), lineType=cv2.LINE_AA)
return ff
#crop and replace primitives
def bb_crop(frame, box):
ff = np.copy(frame)
return ff[box.y0:box.y1,box.x0:box.x1]
def bb_replace(frame1, box, frame2):
ff = np.copy(frame1)
ff[box.y0:box.y1,box.x0:box.x1] = frame2
return ff
#matches frames against each other
def image_match(im1, im2, hess_thresh=150, dist_threshold=1000, accept=0.75):
brisk = cv2.BRISK_create(thresh=hess_thresh)
(kps1, descs1) = brisk.detectAndCompute(im1, None)
(kps2, descs2) = brisk.detectAndCompute(im2, None)
match_cnt = 0
for i,k in enumerate(kps1):
best_match = None
for j,k in enumerate(kps2):
distance = np.linalg.norm(descs2[j]-descs1[i])
if distance < dist_threshold:
if best_match == None:
best_match = (j, distance)
else:
best_match = (j,min(best_match[1], distance))
match_cnt += (best_match != None)
if len(kps1) == 0:
return False
return (match_cnt/len(kps1) >= accept)
| 25.070423
| 106
| 0.701685
|
4a193e62a69ccb2d2accc3f3a38ef19e2f65d421
| 2,220
|
py
|
Python
|
parseAndPopulate/loadJsonFiles.py
|
stanislav-chlebec/backend
|
95b8861c51a4a72de5b50587208b771b9e79d25d
|
[
"Apache-2.0"
] | null | null | null |
parseAndPopulate/loadJsonFiles.py
|
stanislav-chlebec/backend
|
95b8861c51a4a72de5b50587208b771b9e79d25d
|
[
"Apache-2.0"
] | null | null | null |
parseAndPopulate/loadJsonFiles.py
|
stanislav-chlebec/backend
|
95b8861c51a4a72de5b50587208b771b9e79d25d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The IETF Trust 2019, All Rights Reserved
# Copyright 2018 Cisco and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This class will load all the json files from the yangcatalog
private. These files are then used for module compilation status
and results
"""
__author__ = "Miroslav Kovac"
__copyright__ = "Copyright 2018 Cisco and its affiliates, Copyright The IETF Trust 2019, All Rights Reserved"
__license__ = "Apache License, Version 2.0"
__email__ = "miroslav.kovac@pantheon.tech"
import json
from utility import log
class LoadFiles:
def __init__(self, private_dir, log_directory):
LOGGER = log.get_logger(__name__, log_directory + '/parseAndPopulate.log')
LOGGER.debug('Loading compilation statuses and results')
self.names = []
with open(private_dir + '/json_links', 'r') as f:
for line in f:
self.names.append(line.replace('.json', '').replace('\n', ''))
self.status = {}
self.headers = {}
for name in self.names:
with open('{}/{}.json'.format(private_dir, name), 'r') as f:
self.status[name] = json.load(f)
if name == 'IETFYANGRFC':
with open('{}/{}.html'.format(private_dir, name)) as f:
html = f.read()
else:
with open('{}/{}YANGPageCompilation.html'.format(private_dir, name)) as f:
html = f.read()
ths = html.split('<TH>')
results = []
for th in ths:
res = th.split('</TH>')[0]
if 'Compilation Result' in res:
results.append(res)
self.headers[name] = results
| 37
| 109
| 0.63018
|
4a193e8b28c7e96ca42c833840d9867edb711920
| 3,489
|
py
|
Python
|
tests/data_collection/test_reddit_comment_parse.py
|
brandondong/twss
|
075cc5d716de42381bbe907c932c2d50d8d294d4
|
[
"MIT"
] | null | null | null |
tests/data_collection/test_reddit_comment_parse.py
|
brandondong/twss
|
075cc5d716de42381bbe907c932c2d50d8d294d4
|
[
"MIT"
] | null | null | null |
tests/data_collection/test_reddit_comment_parse.py
|
brandondong/twss
|
075cc5d716de42381bbe907c932c2d50d8d294d4
|
[
"MIT"
] | null | null | null |
import unittest
import reddit_comment_parse as rcp
class TestRedditCommentParse(unittest.TestCase):
def test_simple_matches(self):
self.assertEqual(rcp.match_twss("That's what she said"), "")
self.assertEqual(rcp.match_twss("( ͡° ͜ʖ ͡°)"), "")
def test_simple_nonmatches(self):
self.assertEqual(rcp.match_twss("Hello world!"), None)
self.assertEqual(rcp.match_twss("python 3"), None)
def test_simple_match_variations(self):
self.assertEqual(rcp.match_twss("That's what she said!"), "")
self.assertEqual(rcp.match_twss("THAT'S WHAT SHE SAID"), "")
self.assertEqual(rcp.match_twss("thats what she said."), "")
self.assertEqual(rcp.match_twss("thats_what_she_said"), "")
self.assertEqual(rcp.match_twss(" ( ͡° ͜ʖ ͡°)"), "")
self.assertEqual(rcp.match_twss("( ͡° ͜ʖ ͡°) "), "")
self.assertEqual(rcp.match_twss("( ͡° ͜ʖ ͡°)\n"), "")
def test_simple_false_positives(self):
self.assertEqual(rcp.match_twss("That's what she said about Python."), None)
self.assertEqual(rcp.match_twss("( ͡° ͜ʖ ͡°) Hello"), None)
def test_quote_matches(self):
self.assertEqual(rcp.match_twss(">That's huge!\n\nThat's what she said"), "That's huge!")
self.assertEqual(rcp.match_twss("> It's hard\n\n( ͡° ͜ʖ ͡°)"), "It's hard")
self.assertEqual(rcp.match_twss(">I'm exhausted \n\n( ͡° ͜ʖ ͡°)"), "I'm exhausted")
def test_rejects_multi_quotes(self):
self.assertEqual(rcp.match_twss(">That's huge!\n\n>Another quote.\n\nThat's what she said"), None)
def test_extracts_bold(self):
self.assertEqual(rcp.match_twss(">__It's hard__ to sleep\n\nThat's what she said"), "It's hard")
# A real comment: https://www.reddit.com/r/programming/comments/9sc0qj/deepcreampy_decensoring_hentai_with_deep_neural/e8npbse.
test_comment = ">Image Inpainting for **Irregular Holes** Using Partial Convolutions\n\n( ͡° ͜ʖ ͡°)"
self.assertEqual(rcp.match_twss(test_comment), "Irregular Holes")
def test_match_strips_formatting(self):
self.assertEqual(rcp.match_twss(">_It's_ hard\n\nThat's what she said"), "It's hard")
self.assertEqual(rcp.match_twss(">It's *hard*\n\nThat's what she said"), "It's hard")
def test_strip_italics(self):
self.assertEqual(rcp.strip_formatting("_Hello_"), "Hello")
self.assertEqual(rcp.strip_formatting("*Hello*"), "Hello")
self.assertEqual(rcp.strip_formatting("Hello *World!*"), "Hello World!")
self.assertEqual(rcp.strip_formatting("Hello _World!_"), "Hello World!")
def test_strip_bold(self):
self.assertEqual(rcp.strip_formatting("__Hello__"), "Hello")
self.assertEqual(rcp.strip_formatting("**Hello**"), "Hello")
self.assertEqual(rcp.strip_formatting("Hello **World!**"), "Hello World!")
self.assertEqual(rcp.strip_formatting("Hello __World!__"), "Hello World!")
def test_strip_superscript(self):
self.assertEqual(rcp.strip_formatting("Hello^World!"), "Hello World!")
def test_strip_strikethrough(self):
self.assertEqual(rcp.strip_formatting("This is ~~not~~ a joke"), "This is a joke")
self.assertEqual(rcp.strip_formatting("I should finish ~~quickly~~."), "I should finish.")
self.assertEqual(rcp.strip_formatting("~~what's~~ updog"), "updog")
self.assertEqual(rcp.strip_formatting("fire~~truck~~"), "fire")
self.assertEqual(rcp.strip_formatting("~~fire~~truck"), "truck")
def test_strip_escapes(self):
self.assertEqual(rcp.strip_formatting("3 \* 4"), "3 * 4")
self.assertEqual(rcp.strip_formatting(r"3\\4"), r"3\4")
self.assertEqual(rcp.strip_formatting("3\^4"), "3^4")
| 49.842857
| 129
| 0.712238
|
4a1941327f3ec8a59a212a742e3a4abdca8c47ad
| 2,234
|
py
|
Python
|
test/test_procstat.py
|
silverfernsys/supervisoragent
|
dda727d161866e190ce36de22a23381e65534ff8
|
[
"BSD-4-Clause"
] | null | null | null |
test/test_procstat.py
|
silverfernsys/supervisoragent
|
dda727d161866e190ce36de22a23381e65534ff8
|
[
"BSD-4-Clause"
] | null | null | null |
test/test_procstat.py
|
silverfernsys/supervisoragent
|
dda727d161866e190ce36de22a23381e65534ff8
|
[
"BSD-4-Clause"
] | null | null | null |
import mock
import os
import subprocess
import unittest
from supervisoragent.procstat import CPUStats, MemoryStats
class ProcStatTest(unittest.TestCase):
def setUp(self):
self.cpu = CPUStats(1)
self.mem = MemoryStats(1)
self.resources = os.path.join(os.path.abspath(
os.path.dirname(__file__)), 'resources')
def tearDown(self):
self.cpu = None
self.mem = None
@mock.patch.object(subprocess, 'Popen')
def test_cpu(self, mock_popen):
proc_id_stat = open(os.path.join(
self.resources, 'proc_id_stat.txt')).read()
proc_stat = open(os.path.join(self.resources, 'proc_stat.txt')).read()
mock_popen.return_value.returncode.side_effect = [0, 0, 0, 0]
side_effects = [(proc_id_stat, "Error1"), (proc_stat, "Error2"),
(proc_id_stat, "Error3"), (proc_stat, "Error4"),
(proc_id_stat, "Error5"), (proc_stat, "Error6")]
mock_popen.return_value.communicate.side_effect = side_effects
(user_util, sys_util) = self.cpu.cpu_percent()
self.cpu.cpu_percent_change()
(user_util_change, sys_util_change) = self.cpu.cpu_percent_change()
self.assertEqual(user_util_change, 0, "Percent change is zero.")
self.assertEqual(sys_util_change, 0, "Percent change is zero.")
@mock.patch.object(subprocess, 'Popen')
def test_mem(self, mock_popen):
proc_id_smaps = open(os.path.join(
self.resources, 'proc_id_smaps.txt')).read()
proc_meminfo = open(os.path.join(
self.resources, 'proc_meminfo.txt')).read()
mock_popen.return_value.returncode.side_effect = [0, 0, 0, 0]
side_effects = [(proc_id_smaps, "Error1"), (proc_meminfo, "Error2"),
(proc_id_smaps, "Error3"), (proc_meminfo, "Error4")]
mock_popen.return_value.communicate.side_effect = side_effects
(memory_used, memory_total,
free_mem, cached, cached_swap,
total_swap, free_swap) = self.mem.__stat__()
mem_percent = 100 * float(memory_used) / float(memory_total)
memory_percent = self.mem.memory_percent()
self.assertEqual(mem_percent, memory_percent)
| 42.961538
| 78
| 0.646822
|
4a1941df424222cdcfbdb99210c202a44e7555da
| 3,044
|
py
|
Python
|
wandb/sweeps/base.py
|
nbardy/client
|
dfd6345d6a39f20252ac2107652e0f530fc67c98
|
[
"MIT"
] | 1
|
2021-05-29T10:45:46.000Z
|
2021-05-29T10:45:46.000Z
|
wandb/sweeps/base.py
|
jiaqianjing/client
|
41fa2443b9fa28682cbf4ddde85e04df6f9cd9a8
|
[
"MIT"
] | null | null | null |
wandb/sweeps/base.py
|
jiaqianjing/client
|
41fa2443b9fa28682cbf4ddde85e04df6f9cd9a8
|
[
"MIT"
] | null | null | null |
"""
Base classes to be inherited from for Search and EarlyTerminate algorithms
"""
import math
from wandb.sweeps.util import is_nan_or_nan_string
class Search():
def _metric_from_run(self, sweep_config, run, default=None):
metric = None
metric_name = sweep_config['metric']['name']
maximize = False
if 'goal' in sweep_config['metric']:
if sweep_config['metric']['goal'] == 'maximize':
maximize = True
# Use summary to find metric
if metric_name in run.summaryMetrics:
metric = run.summaryMetrics[metric_name]
# Exclude None or NaN summary metrics.
if metric is None or is_nan_or_nan_string(metric):
metric = None
if maximize and metric is not None:
metric = -metric
# Use history to find metric (if available)
metric_history = []
run_history = getattr(run, 'history', [])
for line in run_history:
m = line.get(metric_name)
if m is None:
continue
if is_nan_or_nan_string(m):
continue
metric_history.append(m)
if maximize:
metric_history = [-m for m in metric_history]
# find minimum from summary or history
if metric_history:
if metric:
metric_history.append(metric)
metric = min(metric_history)
# use default if specified
if metric is None:
if default is None:
raise ValueError(
"Couldn't find summary metric {}".format(metric_name))
metric = default
return metric
def next_run(self, sweep):
"""Called each time an agent requests new work.
Arguments:
sweep: <defined above>
Returns:
None if all work complete for this sweep. A dictionary of configuration
parameters for the next run.
"""
raise NotImplementedError
class EarlyTerminate():
def _load_metric_name_and_goal(self, sweep_config):
if not 'metric' in sweep_config:
raise ValueError("Key 'metric' required for early termination")
self.metric_name = sweep_config['metric']['name']
self.maximize = False
if 'goal' in sweep_config['metric']:
if sweep_config['metric']['goal'] == 'maximize':
self.maximize = True
def _load_run_metric_history(self, run):
metric_history = []
for line in run.history:
if self.metric_name in line:
m = line[self.metric_name]
metric_history.append(m)
# Filter out bad values
metric_history = [
x for x in metric_history
if x is not None and not is_nan_or_nan_string(x)
]
if self.maximize:
metric_history = [-m for m in metric_history]
return metric_history
def stop_runs(self, sweep_config, runs):
return [], {}
| 31.061224
| 83
| 0.5818
|
4a1942021180584cf9ec1790234708320bbb23f5
| 2,679
|
py
|
Python
|
blog/views.py
|
dushyant1singh1/blog-applicaion
|
b058a89c23897918f38f4dd22650af3d65c0e3ec
|
[
"MIT"
] | 3
|
2020-01-11T13:43:04.000Z
|
2021-06-05T14:42:51.000Z
|
blog/views.py
|
dushyant1singh1/blog-applicaion
|
b058a89c23897918f38f4dd22650af3d65c0e3ec
|
[
"MIT"
] | null | null | null |
blog/views.py
|
dushyant1singh1/blog-applicaion
|
b058a89c23897918f38f4dd22650af3d65c0e3ec
|
[
"MIT"
] | 2
|
2020-01-11T13:39:33.000Z
|
2020-10-10T12:50:32.000Z
|
from django.shortcuts import render,get_object_or_404
from .models import Post
from django.core.paginator import Paginator,PageNotAnInteger
from django.views.generic import ListView
from .forms import EmailPostForm
from django.core.mail import send_mail
# Create your views here.
# def post_list(request):
# # posts=Post.objects.filter(status="published")
# object_list=Post.published.all()
# paginator=Paginator(object_list,3)#3 post in each page
# page=request.GET.get('page')
# try:
# posts=paginator.page(page)
# except PageNotAnInteger:
# #if page is not an integer deliver the first page
# posts=paginator.page(1)
# except EmptyPage:
# #if pge is out of range deliver last page of results
# posts=paginator.page(paginator.num_pages)
# return render(request
# ,'blog/post/list.html'
# ,{'posts':posts,'page':page})
def post_detail(request,year,month,day,post):
post=get_object_or_404(Post,slug=post,status='published'
,publish__year=year
,publish__month=month
,publish__day=day)
return render(request
,'blog/post/detail.html'
,{'post':post})
class PostListView(ListView):
queryset=Post.published.all()
context_object_name='posts'
paginate_by=2
template_name='blog/post/list.html'
def post_share(request,post_id):
post=get_object_or_404(Post,id=post_id,status='published')#its a shortcut to retreive the post by id
# I used same view for both displaying the initial form and processing the submitted data. we diffrentiate whether the form was submitted or not based
#on the request method we assume that if we get a GET request, an empty form has to be displayed and if we get a POST request, the
# form is submitted and needs to be processed.
sent=False
if request.method=='POST':#form was submitted
form=EmailPostForm(request.POST)
if form.is_valid():#form validation is passed
cd=form.cleaned_data
post_url=request.build_absolute_uri(post.get_absolute_url())
subject='{}({}) recommends your reading "{}"'.format(cd['name'],cd['email'],post.title)
message='Read "{}" at {}\n\n{}\'s comments: {}'.format(post.title,post_url,cd['name'],cd['comments'])
send_mail(subject,message,'admin@myblog.com',[cd['to']])
sent=True
else:
form=EmailPostForm()
return render(request,'blog/post/share.html',{'post':post,'form':form,'sent':sent})
| 45.40678
| 154
| 0.645017
|
4a1943fa2b2ef01cd5fc3b7c46878a5890f7d618
| 8,102
|
py
|
Python
|
tests/pytorch_tests/model_tests/feature_models/relu_bound_test.py
|
lapid92/model_optimization
|
3fc6db67cde912a1e22399bd43bc345ba035b8b6
|
[
"Apache-2.0"
] | null | null | null |
tests/pytorch_tests/model_tests/feature_models/relu_bound_test.py
|
lapid92/model_optimization
|
3fc6db67cde912a1e22399bd43bc345ba035b8b6
|
[
"Apache-2.0"
] | null | null | null |
tests/pytorch_tests/model_tests/feature_models/relu_bound_test.py
|
lapid92/model_optimization
|
3fc6db67cde912a1e22399bd43bc345ba035b8b6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
from model_compression_toolkit import QuantizationConfig, QuantizationErrorMethod
from model_compression_toolkit.hardware_models.default_hwm import get_default_hardware_model
from tests.pytorch_tests.fw_hw_model_pytorch import get_pytorch_test_fw_hw_model_dict
from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
from torch.nn import Conv2d, ReLU, ReLU6, Hardtanh
from torch.nn.functional import relu, relu6, hardtanh
import model_compression_toolkit as mct
import numpy as np
from model_compression_toolkit.pytorch.utils import set_model
"""
This test checks the BatchNorm folding feature, plus adding a residual connection.
"""
class ReLUBoundToPOTNet(torch.nn.Module):
def __init__(self):
super(ReLUBoundToPOTNet, self).__init__()
self.conv1 = Conv2d(3, 3, kernel_size=1, stride=1)
self.relu1 = ReLU6()
self.conv2 = Conv2d(3, 3, kernel_size=1, stride=1)
self.conv3 = Conv2d(3, 3, kernel_size=1, stride=1)
self.conv4 = Conv2d(3, 3, kernel_size=1, stride=1)
self.relu2 = ReLU()
def forward(self, inp):
x = self.conv1(inp)
x = self.relu1(x)
x = self.conv2(x)
x = self.conv3(x)
x = relu6(x)
x = self.conv4(x)
x = self.relu2(x)
x = relu(x)
return x
class HardtanhBoundToPOTNet(torch.nn.Module):
def __init__(self):
super(HardtanhBoundToPOTNet, self).__init__()
self.conv1 = Conv2d(3, 3, kernel_size=1, stride=1)
self.hardtanh1 = Hardtanh(min_val=0.0, max_val=6.0)
self.conv2 = Conv2d(3, 3, kernel_size=1, stride=1)
self.conv3 = Conv2d(3, 3, kernel_size=1, stride=1)
self.hardtanh2 = Hardtanh(min_val=-2.0, max_val=6.0)
self.conv4 = Conv2d(3, 3, kernel_size=1, stride=1)
self.conv5 = Conv2d(3, 3, kernel_size=1, stride=1)
self.conv6 = Conv2d(3, 3, kernel_size=1, stride=1)
self.conv7 = Conv2d(3, 3, kernel_size=1, stride=1)
self.hardtanh3 = Hardtanh(min_val=0.0, max_val=4.0)
self.conv8 = Conv2d(3, 3, kernel_size=1, stride=1)
self.conv9 = Conv2d(3, 3, kernel_size=1, stride=1)
def forward(self, inp):
x = self.conv1(inp)
x = self.hardtanh1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.hardtanh2(x)
x = self.conv4(x)
x = self.conv5(x)
x = hardtanh(x, min_val=0.0, max_val=6.0)
x = self.conv6(x)
x = self.conv7(x)
x = self.hardtanh3(x)
x = self.conv8(x)
x = self.conv9(x)
x = relu(x)
return x
class ReLUBoundToPOTNetTest(BasePytorchTest):
"""
This test checks the ReLU Bound To POT feature.
"""
def __init__(self, unit_test, float_reconstruction_error=1e-6):
super().__init__(unit_test, float_reconstruction_error)
def create_inputs_shape(self):
return [[self.val_batch_size, 3, 32, 32]]
def get_fw_hw_model(self):
return get_pytorch_test_fw_hw_model_dict(hardware_model=get_default_hardware_model(),
test_name='8bit_relu_bound',
fhwm_name='relu_bound_pytorch_test')
def get_quantization_configs(self):
quant_config = QuantizationConfig(QuantizationErrorMethod.MSE,
QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True)
return {"8bit_relu_bound": quant_config}
def create_feature_network(self, input_shape):
return ReLUBoundToPOTNet()
def compare(self, quantized_models, float_model, input_x=None, quantization_info=None):
set_model(float_model)
for model_name, quantized_model in quantized_models.items():
set_model(quantized_model)
alpha_1 = (quantized_model.conv1.weight / float_model.conv1.weight).detach().cpu().numpy().mean()
beta_1 = (quantized_model.conv2.weight / float_model.conv2.weight).detach().cpu().numpy().mean()
alpha_2 = (quantized_model.conv3.weight / float_model.conv3.weight).detach().cpu().numpy().mean()
beta_2 = (quantized_model.conv4.weight / float_model.conv4.weight).detach().cpu().numpy().mean()
self.unit_test.assertTrue(np.allclose(alpha_1 * beta_1, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(alpha_1 * 6 / 8, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(8 / 6 * beta_1, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(alpha_2 * beta_2, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(alpha_2 * 6 / 8, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(8 / 6 * beta_2, 1, atol=1e-1))
class HardtanhBoundToPOTNetTest(BasePytorchTest):
"""
This test checks the Hardtanh Bound To POT feature.
"""
def __init__(self, unit_test, float_reconstruction_error=1e-6):
super().__init__(unit_test, float_reconstruction_error)
def create_inputs_shape(self):
return [[self.val_batch_size, 3, 32, 32]]
def get_fw_hw_model(self):
return get_pytorch_test_fw_hw_model_dict(hardware_model=get_default_hardware_model(),
test_name='8bit_relu_bound',
fhwm_name='relu_bound_pytorch_test')
def get_quantization_configs(self):
quant_config = QuantizationConfig(QuantizationErrorMethod.MSE,
QuantizationErrorMethod.MSE,
relu_bound_to_power_of_2=True)
return {"8bit_relu_bound": quant_config}
def create_feature_network(self, input_shape):
return HardtanhBoundToPOTNet()
def compare(self, quantized_models, float_model, input_x=None, quantization_info=None):
set_model(float_model)
for model_name, quantized_model in quantized_models.items():
set_model(quantized_model)
alpha_1 = (quantized_model.conv1.weight / float_model.conv1.weight).detach().cpu().numpy().mean()
beta_1 = (quantized_model.conv2.weight / float_model.conv2.weight).detach().cpu().numpy().mean()
alpha_2 = (quantized_model.conv5.weight / float_model.conv5.weight).detach().cpu().numpy().mean()
beta_2 = (quantized_model.conv6.weight / float_model.conv6.weight).detach().cpu().numpy().mean()
self.unit_test.assertTrue(np.allclose(alpha_1 * beta_1, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(alpha_1 * 6 / 8, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(8 / 6 * beta_1, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(alpha_2 * beta_2, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(alpha_2 * 6 / 8, 1, atol=1e-1))
self.unit_test.assertTrue(np.allclose(8 / 6 * beta_2, 1, atol=1e-1))
self.unit_test.assertTrue(quantized_model.hardtanh2.max_val == float_model.hardtanh2.max_val)
self.unit_test.assertTrue(quantized_model.hardtanh2.min_val == float_model.hardtanh2.min_val)
self.unit_test.assertTrue(quantized_model.hardtanh3.max_val == float_model.hardtanh3.max_val)
self.unit_test.assertTrue(quantized_model.hardtanh3.min_val == float_model.hardtanh3.min_val)
| 46.297143
| 109
| 0.651321
|
4a1944404b1a267b8fb67b9746e6721b695862cb
| 7,434
|
py
|
Python
|
manim/animation/composition.py
|
dianechae/manim
|
269e5c22cb02de108579eccee2b82583eb71e3dc
|
[
"MIT"
] | 1
|
2021-10-17T15:43:51.000Z
|
2021-10-17T15:43:51.000Z
|
manim/animation/composition.py
|
dianechae/manim
|
269e5c22cb02de108579eccee2b82583eb71e3dc
|
[
"MIT"
] | null | null | null |
manim/animation/composition.py
|
dianechae/manim
|
269e5c22cb02de108579eccee2b82583eb71e3dc
|
[
"MIT"
] | null | null | null |
"""Tools for displaying multiple animations at once."""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, Optional, Sequence, Union
import numpy as np
from .._config import config
from ..animation.animation import Animation, prepare_animation
from ..mobject.mobject import Group, Mobject
from ..mobject.opengl_mobject import OpenGLGroup
from ..scene.scene import Scene
from ..utils.iterables import remove_list_redundancies
from ..utils.rate_functions import linear
if TYPE_CHECKING:
from ..mobject.types.opengl_vectorized_mobject import OpenGLVGroup
from ..mobject.types.vectorized_mobject import VGroup
__all__ = ["AnimationGroup", "Succession", "LaggedStart", "LaggedStartMap"]
DEFAULT_LAGGED_START_LAG_RATIO: float = 0.05
class AnimationGroup(Animation):
def __init__(
self,
*animations: Animation,
group: Group | VGroup | OpenGLGroup | OpenGLVGroup = None,
run_time: float | None = None,
rate_func: Callable[[float], float] = linear,
lag_ratio: float = 0,
**kwargs
) -> None:
self.animations = [prepare_animation(anim) for anim in animations]
self.group = group
if self.group is None:
mobjects = remove_list_redundancies(
[anim.mobject for anim in self.animations if not anim.is_introducer()],
)
if config["renderer"] == "opengl":
self.group = OpenGLGroup(*mobjects)
else:
self.group = Group(*mobjects)
super().__init__(self.group, rate_func=rate_func, lag_ratio=lag_ratio, **kwargs)
self.run_time: float = self.init_run_time(run_time)
def get_all_mobjects(self) -> Sequence[Mobject]:
return list(self.group)
def begin(self) -> None:
if self.suspend_mobject_updating:
self.group.suspend_updating()
for anim in self.animations:
anim.begin()
def _setup_scene(self, scene) -> None:
for anim in self.animations:
anim._setup_scene(scene)
def finish(self) -> None:
for anim in self.animations:
anim.finish()
if self.suspend_mobject_updating:
self.group.resume_updating()
def clean_up_from_scene(self, scene: Scene) -> None:
self._on_finish(scene)
for anim in self.animations:
if self.remover:
anim.remover = self.remover
anim.clean_up_from_scene(scene)
def update_mobjects(self, dt: float) -> None:
for anim in self.animations:
anim.update_mobjects(dt)
def init_run_time(self, run_time) -> float:
self.build_animations_with_timings()
if self.anims_with_timings:
self.max_end_time = np.max([awt[2] for awt in self.anims_with_timings])
else:
self.max_end_time = 0
return self.max_end_time if run_time is None else run_time
def build_animations_with_timings(self) -> None:
"""
Creates a list of triplets of the form
(anim, start_time, end_time)
"""
self.anims_with_timings = []
curr_time: float = 0
for anim in self.animations:
start_time: float = curr_time
end_time: float = start_time + anim.get_run_time()
self.anims_with_timings.append((anim, start_time, end_time))
# Start time of next animation is based on the lag_ratio
curr_time = (1 - self.lag_ratio) * start_time + self.lag_ratio * end_time
def interpolate(self, alpha: float) -> None:
# Note, if the run_time of AnimationGroup has been
# set to something other than its default, these
# times might not correspond to actual times,
# e.g. of the surrounding scene. Instead they'd
# be a rescaled version. But that's okay!
time = alpha * self.max_end_time
for anim, start_time, end_time in self.anims_with_timings:
anim_time = end_time - start_time
if anim_time == 0:
sub_alpha = 0
else:
sub_alpha = np.clip((time - start_time) / anim_time, 0, 1)
anim.interpolate(sub_alpha)
class Succession(AnimationGroup):
def __init__(self, *animations: Animation, lag_ratio: float = 1, **kwargs) -> None:
super().__init__(*animations, lag_ratio=lag_ratio, **kwargs)
def begin(self) -> None:
assert len(self.animations) > 0
self.update_active_animation(0)
def finish(self) -> None:
while self.active_animation is not None:
self.next_animation()
def update_mobjects(self, dt: float) -> None:
if self.active_animation:
self.active_animation.update_mobjects(dt)
def _setup_scene(self, scene) -> None:
if scene is None:
return
if self.is_introducer():
for anim in self.animations:
if not anim.is_introducer() and anim.mobject is not None:
scene.add(anim.mobject)
self.scene = scene
def update_active_animation(self, index: int) -> None:
self.active_index = index
if index >= len(self.animations):
self.active_animation: Animation | None = None
self.active_start_time: float | None = None
self.active_end_time: float | None = None
else:
self.active_animation = self.animations[index]
self.active_animation._setup_scene(self.scene)
self.active_animation.begin()
self.active_start_time = self.anims_with_timings[index][1]
self.active_end_time = self.anims_with_timings[index][2]
def next_animation(self) -> None:
if self.active_animation is not None:
self.active_animation.finish()
self.update_active_animation(self.active_index + 1)
def interpolate(self, alpha: float) -> None:
current_time = alpha * self.run_time
while self.active_end_time is not None and current_time >= self.active_end_time:
self.next_animation()
if self.active_animation is not None and self.active_start_time is not None:
elapsed = current_time - self.active_start_time
active_run_time = self.active_animation.get_run_time()
subalpha = elapsed / active_run_time if active_run_time != 0.0 else 1.0
self.active_animation.interpolate(subalpha)
class LaggedStart(AnimationGroup):
def __init__(
self,
*animations: Animation,
lag_ratio: float = DEFAULT_LAGGED_START_LAG_RATIO,
**kwargs
):
super().__init__(*animations, lag_ratio=lag_ratio, **kwargs)
class LaggedStartMap(LaggedStart):
def __init__(
self,
AnimationClass: Callable[..., Animation],
mobject: Mobject,
arg_creator: Callable[[Mobject], str] = None,
run_time: float = 2,
**kwargs
) -> None:
args_list = []
for submob in mobject:
if arg_creator:
args_list.append(arg_creator(submob))
else:
args_list.append((submob,))
anim_kwargs = dict(kwargs)
if "lag_ratio" in anim_kwargs:
anim_kwargs.pop("lag_ratio")
animations = [AnimationClass(*args, **anim_kwargs) for args in args_list]
super().__init__(*animations, run_time=run_time, **kwargs)
| 36.441176
| 88
| 0.635055
|
4a1944add4d7dce784b76d80cb59fef1ea8a740c
| 2,476
|
py
|
Python
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_my_sql_linked_service.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 4
|
2016-06-17T23:25:29.000Z
|
2022-03-30T22:37:45.000Z
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_my_sql_linked_service.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 2
|
2016-09-30T21:40:24.000Z
|
2017-11-10T18:16:18.000Z
|
azure-mgmt-datafactory/azure/mgmt/datafactory/models/azure_my_sql_linked_service.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | 3
|
2016-05-03T20:49:46.000Z
|
2017-10-05T21:05:27.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .linked_service import LinkedService
class AzureMySqlLinkedService(LinkedService):
"""Azure MySQL database linked service.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param connect_via: The integration runtime reference.
:type connect_via:
~azure.mgmt.datafactory.models.IntegrationRuntimeReference
:param description: Linked service description.
:type description: str
:param type: Constant filled by server.
:type type: str
:param connection_string: The connection string.
:type connection_string: ~azure.mgmt.datafactory.models.SecureString
:param encrypted_credential: The encrypted credential used for
authentication. Credentials are encrypted using the integration runtime
credential manager. Type: string (or Expression with resultType string).
:type encrypted_credential: object
"""
_validation = {
'type': {'required': True},
'connection_string': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'connect_via': {'key': 'connectVia', 'type': 'IntegrationRuntimeReference'},
'description': {'key': 'description', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'connection_string': {'key': 'typeProperties.connectionString', 'type': 'SecureString'},
'encrypted_credential': {'key': 'typeProperties.encryptedCredential', 'type': 'object'},
}
def __init__(self, connection_string, additional_properties=None, connect_via=None, description=None, encrypted_credential=None):
super(AzureMySqlLinkedService, self).__init__(additional_properties=additional_properties, connect_via=connect_via, description=description)
self.connection_string = connection_string
self.encrypted_credential = encrypted_credential
self.type = 'AzureMySql'
| 45.018182
| 148
| 0.67811
|
4a19467cecf4f9c0c61b49f0dafb13d7994bed58
| 203
|
py
|
Python
|
jifx/make_geo_ids.py
|
zaza81/clavin
|
7a1f0c9745736869b5040c66785f080acc755a9c
|
[
"Apache-2.0"
] | null | null | null |
jifx/make_geo_ids.py
|
zaza81/clavin
|
7a1f0c9745736869b5040c66785f080acc755a9c
|
[
"Apache-2.0"
] | null | null | null |
jifx/make_geo_ids.py
|
zaza81/clavin
|
7a1f0c9745736869b5040c66785f080acc755a9c
|
[
"Apache-2.0"
] | null | null | null |
import sys
f = open("GeocodedTweets.tsv")
f.readline()
print "id\tUnknown1\tUnknown2\tUserName\tTweet\tGeocoded\tLat\tLong\tDateTime"
i = 0
for line in f:
i += 1
print str(i) + "\t" + line,
| 14.5
| 78
| 0.665025
|
4a1946b9c6b713b1c62d68e921241dadd64a22e2
| 838
|
py
|
Python
|
docker/nginx/start.py
|
nabladev/ohmyform
|
b9180596905c0e423ba4fef0789b810ded56c771
|
[
"MIT"
] | 2,092
|
2017-04-24T10:37:44.000Z
|
2022-03-30T13:44:39.000Z
|
docker/nginx/start.py
|
nabladev/ohmyform
|
b9180596905c0e423ba4fef0789b810ded56c771
|
[
"MIT"
] | 146
|
2017-04-23T17:55:31.000Z
|
2019-10-09T14:00:12.000Z
|
docker/nginx/start.py
|
nabladev/ohmyform
|
b9180596905c0e423ba4fef0789b810ded56c771
|
[
"MIT"
] | 503
|
2017-04-23T12:21:43.000Z
|
2022-03-30T15:07:43.000Z
|
#!/usr/bin/python
import os
import subprocess
#Set default port
if not os.environ["PORT"]:
os.environ["PORT"] = "5000"
#Set default sockets port
if not os.environ["SOCKET_PORT"]:
os.environ["SOCKET_PORT"] = "20523"
# Actual startup script
if not os.path.exists("/certs/dhparam.pem") and os.environ["TLS_FLAVOR"] != "notls":
os.system("openssl dhparam -out /certs/dhparam.pem 2048")
if os.environ["TLS_FLAVOR"] == "letsencrypt":
subprocess.Popen(["/letsencrypt.py"])
elif os.environ["TLS_FLAVOR"] == "cert":
if not os.path.exists("/certs/cert.pem"):
os.system("openssl req -newkey rsa:2048 -x509 -keyout /certs/key.pem -out /certs/cert.pem -days 365 -nodes -subj '/C=NA/ST=None/L=None/O=None/CN=" + os.environ["BASE_URL"] + "'")
subprocess.call(["/config.py"])
os.execv("/usr/sbin/nginx", ["nginx", "-g", "daemon off;"])
| 33.52
| 180
| 0.681384
|
4a19488b7518e11550f4c9050ea8bb16561262f8
| 379
|
py
|
Python
|
src/time_functions.py
|
joseph-mccarthy/night-sky-pi
|
045b54c8ae8d551e951cd35bc40a94d3a4171975
|
[
"MIT"
] | null | null | null |
src/time_functions.py
|
joseph-mccarthy/night-sky-pi
|
045b54c8ae8d551e951cd35bc40a94d3a4171975
|
[
"MIT"
] | null | null | null |
src/time_functions.py
|
joseph-mccarthy/night-sky-pi
|
045b54c8ae8d551e951cd35bc40a94d3a4171975
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
def _get_now() -> datetime:
return datetime.now()
def get_today() -> datetime:
return _get_now()
def get_tomorrow() -> datetime:
return _get_now() + timedelta(1)
def get_yesterday() -> datetime:
return _get_now() - timedelta(1)
def get_dates() -> tuple():
return (get_yesterday(), get_today(), get_tomorrow())
| 18.95
| 57
| 0.6781
|
4a1948b00b976be11cf32a3ed896cfdd0620f4c0
| 4,652
|
py
|
Python
|
tests/distributed/test_workspaces/test_remote_workspaces.py
|
manognyaa/jina
|
14cbb0dd1d5d7493f12cf4b8fb4066626f17c2e1
|
[
"Apache-2.0"
] | null | null | null |
tests/distributed/test_workspaces/test_remote_workspaces.py
|
manognyaa/jina
|
14cbb0dd1d5d7493f12cf4b8fb4066626f17c2e1
|
[
"Apache-2.0"
] | null | null | null |
tests/distributed/test_workspaces/test_remote_workspaces.py
|
manognyaa/jina
|
14cbb0dd1d5d7493f12cf4b8fb4066626f17c2e1
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import numpy as np
import pytest
from jina import Flow, Client, Document, __default_host__
from ..helpers import create_workspace, wait_for_workspace, delete_workspace
cur_dir = os.path.dirname(os.path.abspath(__file__))
compose_yml = os.path.join(cur_dir, 'docker-compose.yml')
"""
Run below commands for local tests
docker build --build-arg PIP_TAG=daemon -f Dockerfiles/debianx.Dockerfile -t jinaai/jina:test-daemon .
docker run --add-host host.docker.internal:host-gateway \
--name jinad -v /var/run/docker.sock:/var/run/docker.sock -v /tmp/jinad:/tmp/jinad \
-p 8000:8000 -d jinaai/jina:test-daemon
"""
CLOUD_HOST = 'localhost:8000' # consider it as the staged version
NUM_DOCS = 100
@pytest.mark.parametrize('parallels', [1])
def test_upload_simple(parallels, mocker):
response_mock = mocker.Mock()
f = (
Flow()
.add()
.add(
uses='mwu_encoder.yml',
host=CLOUD_HOST,
parallel=parallels,
upload_files=['mwu_encoder.py'],
)
.add()
)
with f:
f.index(
inputs=(Document(blob=np.random.random([1, 100])) for _ in range(NUM_DOCS)),
on_done=response_mock,
)
response_mock.assert_called()
@pytest.mark.parametrize('parallels', [2])
def test_upload_multiple_workspaces(parallels, mocker):
response_mock = mocker.Mock()
encoder_workspace = 'tf_encoder_ws'
indexer_workspace = 'tdb_indexer_ws'
def _path(dir, filename):
return os.path.join(cur_dir, dir, filename)
f = (
Flow()
.add(
name='tf_encoder',
uses=_path(encoder_workspace, 'tf.yml'),
host=CLOUD_HOST,
parallel=parallels,
py_modules=[_path(encoder_workspace, 'tf_encoder.py')],
upload_files=[
_path(encoder_workspace, '.jinad'),
_path(encoder_workspace, 'requirements.txt'),
],
)
.add(
name='tdb_indexer',
uses=_path(indexer_workspace, 'tdb.yml'),
host=CLOUD_HOST,
parallel=parallels,
py_modules=[_path(indexer_workspace, 'tdb_indexer.py')],
upload_files=[
_path(indexer_workspace, '.jinad'),
_path(indexer_workspace, 'requirements.txt'),
],
)
)
with f:
f.index(
inputs=(Document(blob=np.random.random([1, 100])) for _ in range(NUM_DOCS)),
on_done=response_mock,
)
response_mock.assert_called()
def test_custom_project():
HOST = __default_host__
workspace_id = create_workspace(
dirpath=os.path.join(cur_dir, 'flow_app_ws'), host=HOST
)
assert wait_for_workspace(workspace_id, host=HOST)
# we need to wait for the flow to start in the custom project
time.sleep(5)
def gen_docs():
import string
d = iter(string.ascii_lowercase)
while True:
try:
yield Document(tags={'first': next(d), 'second': next(d)})
except StopIteration:
return
Client(host=HOST, port_expose=42860, show_progress=True).post(
on='/index', inputs=gen_docs
)
res = Client(host=HOST, port_expose=42860, show_progress=True).post(
on='/search',
inputs=Document(tags={'key': 'first', 'value': 's'}),
return_results=True,
)
assert res[0].data.docs[0].matches[0].tags.fields['first'].string_value == 's'
assert res[0].data.docs[0].matches[0].tags.fields['second'].string_value == 't'
delete_workspace(workspace_id, host=HOST)
@pytest.fixture()
def docker_compose(request):
os.system(f'docker network prune -f ')
os.system(
f'docker-compose -f {request.param} --project-directory . up --build -d --remove-orphans'
)
time.sleep(5)
yield
os.system(
f'docker-compose -f {request.param} --project-directory . down --remove-orphans'
)
os.system(f'docker network prune -f ')
@pytest.mark.parametrize('docker_compose', [compose_yml], indirect=['docker_compose'])
def test_upload_simple_non_standard_rootworkspace(docker_compose, mocker):
response_mock = mocker.Mock()
f = (
Flow()
.add()
.add(
uses='mwu_encoder.yml',
host='localhost:9000',
upload_files=['mwu_encoder.py'],
)
.add()
)
with f:
f.index(
inputs=(Document(blob=np.random.random([1, 100])) for _ in range(NUM_DOCS)),
on_done=response_mock,
)
response_mock.assert_called()
| 29.820513
| 102
| 0.610275
|
4a1948bc3a2fa9e966660a404195ba7975bb34d2
| 9,056
|
py
|
Python
|
bokeh/core/properties.py
|
RunOrVeith/bokeh
|
a29eda007c886ec0cb21628038204f08cfe6b4c1
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh/core/properties.py
|
RunOrVeith/bokeh
|
a29eda007c886ec0cb21628038204f08cfe6b4c1
|
[
"BSD-3-Clause"
] | 4
|
2021-03-18T22:30:03.000Z
|
2022-02-12T06:12:28.000Z
|
bokeh/core/properties.py
|
RunOrVeith/bokeh
|
a29eda007c886ec0cb21628038204f08cfe6b4c1
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide property types for Bokeh models
Properties are objects that can be assigned as class attributes on Bokeh
models, to provide automatic serialization, validation, and documentation.
This documentation is broken down into the following sections:
.. contents::
:local:
Overview
--------
There are many property types defined in the module, for example ``Int`` to
represent integral values, ``Seq`` to represent sequences (e.g. lists or
tuples, etc.). Properties can also be combined: ``Seq(Float)`` represents
a sequence of floating point values.
For example, the following defines a model that has integer, string, and
list[float] properties:
.. code-block:: python
class SomeModel(Model):
foo = Int
bar = String(default="something")
baz = List(Float, help="docs for baz prop")
As seen, properties can be declared as just the property type, e.g.
``foo = Int``, in which case the properties are automatically instantiated
on new Model objects. Or the property can be instantiated on the class,
and configured with default values and help strings.
The properties of this class can be initialized by specifying keyword
arguments to the initializer:
.. code-block:: python
m = SomeModel(foo=10, bar="a str", baz=[1,2,3,4])
But also by setting the attributes on an instance:
.. code-block:: python
m.foo = 20
Attempts to set a property to a value of the wrong type will
result in a ``ValueError`` exception:
.. code-block:: python
>>> m.foo = 2.3
Traceback (most recent call last):
<< traceback omitted >>
ValueError: expected a value of type Integral, got 2.3 of type float
Models with properties know how to serialize themselves, to be understood
by BokehJS. Additionally, any help strings provided on properties can be
easily and automatically extracted with the Sphinx extensions in the
:ref:`bokeh.sphinxext` module.
Basic Properties
----------------
.. autoclass:: Angle
.. autoclass:: Any
.. autoclass:: AnyRef
.. autoclass:: Auto
.. autoclass:: Bool
.. autoclass:: Byte
.. autoclass:: Color
.. autoclass:: Complex
.. autoclass:: DashPattern
.. autoclass:: Date
.. autoclass:: Datetime
.. autoclass:: Either
.. autoclass:: Enum
.. autoclass:: Float
.. autoclass:: FontSize
.. autoclass:: Image
.. autoclass:: Instance
.. autoclass:: Int
.. autoclass:: Interval
.. autoclass:: JSON
.. autoclass:: MarkerType
.. autoclass:: MinMaxBounds
.. autoclass:: Percent
.. autoclass:: RGB
.. autoclass:: Regex
.. autoclass:: Size
.. autoclass:: String
.. autoclass:: TimeDelta
Container Properties
--------------------
.. autoclass:: Array
.. autoclass:: ColumnData
.. autoclass:: Dict
.. autoclass:: List
.. autoclass:: RelativeDelta
.. autoclass:: Seq
.. autoclass:: Tuple
DataSpec Properties
-------------------
.. autoclass:: AngleSpec
.. autoclass:: ColorSpec
.. autoclass:: DataDistanceSpec
.. autoclass:: DataSpec
.. autoclass:: DistanceSpec
.. autoclass:: FontSizeSpec
.. autoclass:: MarkerSpec
.. autoclass:: NumberSpec
.. autoclass:: ScreenDistanceSpec
.. autoclass:: StringSpec
.. autoclass:: UnitsSpec
Helpers
~~~~~~~
.. autofunction:: expr
.. autofunction:: field
.. autofunction:: value
Special Properties
------------------
.. autoclass:: Include
.. autoclass:: Override
Validation Control
------------------
By default, Bokeh properties perform type validation on values. This helps to
ensure the consistency of any data exchanged between Python and JavaScript, as
well as provide detailed and immediate feedback to users if they attempt to
set values of the wrong type. However, these type checks incur some overhead.
In some cases it may be desirable to turn off validation in specific places,
or even entirely, in order to boost performance. The following API is available
to control when type validation occurs.
.. autoclass:: validate
.. autofunction:: without_property_validation
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Angle',
'AngleSpec',
'Any',
'AnyRef',
'Array',
'Auto',
'Bool',
'Byte',
'Color',
'ColorSpec',
'ColumnData',
'Complex',
'DashPattern',
'DataDistanceSpec',
'DataSpec',
'Date',
'Datetime',
'Dict',
'DistanceSpec',
'Either',
'Enum',
'Float',
'FontSize',
'FontSizeSpec',
'Image',
'Include',
'Instance',
'Int',
'Interval',
'JSON',
'List',
'MarkerSpec',
'MarkerType',
'MinMaxBounds',
'NumberSpec',
'Override',
'Percent',
'RGB',
'Regex',
'RelativeDelta',
'ScreenDistanceSpec',
'Seq',
'Size',
'String',
'StringSpec',
'TimeDelta',
'Tuple',
'UnitsSpec',
'expr',
'field',
'validate',
'value',
'without_property_validation'
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
from .property.any import Any; Any
from .property.any import AnyRef; AnyRef
from .property.auto import Auto; Auto
from .property.color import Color; Color
from .property.color import RGB; RGB
from .property.container import Array; Array
from .property.container import ColumnData; ColumnData
from .property.container import Dict; Dict
from .property.container import List; List
from .property.container import Seq; Seq
from .property.container import Tuple; Tuple
from .property.container import RelativeDelta; RelativeDelta
from .property.dataspec import AngleSpec; AngleSpec
from .property.dataspec import ColorSpec; ColorSpec
from .property.dataspec import DataSpec; DataSpec
from .property.dataspec import DataDistanceSpec; DataDistanceSpec
from .property.dataspec import DistanceSpec; DistanceSpec
from .property.dataspec import expr; expr
from .property.dataspec import field; field
from .property.dataspec import FontSizeSpec; FontSizeSpec
from .property.dataspec import MarkerSpec; MarkerSpec
from .property.dataspec import NumberSpec; NumberSpec
from .property.dataspec import ScreenDistanceSpec; ScreenDistanceSpec
from .property.dataspec import StringSpec; StringSpec
from .property.dataspec import UnitsSpec; UnitsSpec
from .property.dataspec import value; value
from .property.datetime import Date; Date
from .property.datetime import Datetime; Datetime
from .property.datetime import TimeDelta; TimeDelta
from .property.either import Either; Either
from .property.enum import Enum; Enum
from .property.include import Include ; Include
from .property.instance import Instance; Instance
from .property.json import JSON; JSON
from .property.numeric import Angle; Angle
from .property.numeric import Byte; Byte
from .property.numeric import Interval; Interval
from .property.numeric import Percent; Percent
from .property.numeric import Size; Size
from .property.override import Override ; Override
from .property.primitive import Bool; Bool
from .property.primitive import Complex; Complex
from .property.primitive import Int; Int
from .property.primitive import Float; Float
from .property.primitive import String; String
from .property.regex import Regex; Regex
from .property.visual import DashPattern; DashPattern
from .property.visual import FontSize; FontSize
from .property.visual import Image; Image
from .property.visual import MinMaxBounds; MinMaxBounds
from .property.visual import MarkerType; MarkerType
from .property.validation import validate; validate
from .property.validation import without_property_validation; without_property_validation
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 28.3
| 89
| 0.629638
|
4a1948d3d05295f67598c31ac17557038e4c764d
| 38,762
|
py
|
Python
|
built_in_tasks/onedim_lfp_tasks.py
|
DerekYJC/bmi_python
|
7b9cf3f294a33688db24b0863c1035e9cc6999ea
|
[
"Apache-2.0"
] | null | null | null |
built_in_tasks/onedim_lfp_tasks.py
|
DerekYJC/bmi_python
|
7b9cf3f294a33688db24b0863c1035e9cc6999ea
|
[
"Apache-2.0"
] | null | null | null |
built_in_tasks/onedim_lfp_tasks.py
|
DerekYJC/bmi_python
|
7b9cf3f294a33688db24b0863c1035e9cc6999ea
|
[
"Apache-2.0"
] | null | null | null |
'''
Base tasks for generic point-to-point reaching
'''
from __future__ import division
import numpy as np
from collections import OrderedDict
import time
from riglib import reward
from riglib.experiment import traits, Sequence
from riglib.stereo_opengl.window import Window, FPScontrol, WindowDispl2D
from riglib.stereo_opengl.primitives import Cylinder, Plane, Sphere, Cube
from riglib.stereo_opengl.models import FlatMesh, Group
from riglib.stereo_opengl.textures import Texture, TexModel
from riglib.stereo_opengl.render import stereo, Renderer
from riglib.stereo_opengl.utils import cloudy_tex
from .plantlist import plantlist
from riglib.stereo_opengl import ik
import math
import traceback
from riglib.bmi import bmi
#from built_in_tasks.bmimultitasks import BMILoop
from riglib.bmi.bmi import BMILoop
from built_in_tasks.manualcontrolmultitasks import ManualControlMulti, VirtualCircularTarget
from riglib.bmi import onedim_lfp_decoder
####### CONSTANTS
sec_per_min = 60.0
RED = (1,0,0,.5)
GREEN = (0,1,0,0.5)
mm_per_cm = 1./10
class SquareTarget(object):
def __init__(self, target_radius=2, target_color=(1, 0, 0, .5), starting_pos=np.zeros(3)):
self.target_rad = target_radius
self.target_color = target_color
self.position = starting_pos
self.int_position = starting_pos
self._pickle_init()
def _pickle_init(self):
self.cube = Cube(side_len=self.target_rad, color=self.target_color)
self.graphics_models = [self.cube]
self.cube.translate(*self.position)
def move_to_position(self, new_pos):
self.int_position = new_pos
self.drive_to_new_pos()
def drive_to_new_pos(self):
raise NotImplementedError
class VirtualSquareTarget(SquareTarget):
def drive_to_new_pos(self):
self.position = self.int_position
self.cube.translate(*self.position, reset=True)
def hide(self):
self.cube.detach()
def show(self):
self.cube.attach()
def cue_trial_start(self):
self.cube.color = self.target_color
self.show()
def cue_trial_end_success(self):
self.cube.color = GREEN
def cue_trial_end_failure(self):
self.cube.color = RED
self.hide()
# self.sphere.color = GREEN
def idle(self):
self.cube.color = self.target_color
self.hide()
class LFP_Mod(BMILoop, Sequence, Window):
background = (0,0,0,1)
plant_visible = traits.Bool(True, desc='Specifies whether entire plant is displayed or just endpoint')
lfp_cursor_rad = traits.Float(.5, desc="length of LFP cursor")
lfp_cursor_color = (.5,0,.5,.75)
lfp_plant_type_options = list(plantlist.keys())
lfp_plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=list(plantlist.keys()))
window_size = traits.Tuple((1920*2, 1080), desc='window size')
lfp_frac_lims = traits.Tuple((0., 0.35), desc='fraction limits')
xlfp_frac_lims = traits.Tuple((-.7, 1.7), desc = 'x dir fraction limits')
lfp_control_band = traits.Tuple((25, 40), desc='beta power band limits')
lfp_totalpw_band = traits.Tuple((1, 100), desc='total power band limits')
xlfp_control_band = traits.Tuple((0, 5), desc = 'x direction band limits')
n_steps = traits.Int(2, desc='moving average for decoder')
is_bmi_seed = True
powercap = traits.Float(1, desc="Timeout for total power above this")
zboundaries=(-12,12)
status = dict(
wait = dict(start_trial="lfp_target", stop=None),
lfp_target = dict(enter_lfp_target="lfp_hold", powercap_penalty="powercap_penalty", stop=None),
lfp_hold = dict(leave_early="lfp_target", lfp_hold_complete="reward", powercap_penalty="powercap_penalty"),
powercap_penalty = dict(powercap_penalty_end="lfp_target"),
reward = dict(reward_end="wait")
)
static_states = [] # states in which the decoder is not run
trial_end_states = ['reward']
lfp_cursor_on = ['lfp_target', 'lfp_hold']
#initial state
state = "wait"
#create settable traits
reward_time = traits.Float(.5, desc="Length of juice reward")
lfp_target_rad = traits.Float(3.6, desc="Length of targets in cm")
lfp_hold_time = traits.Float(.2, desc="Length of hold required at lfp targets")
lfp_hold_var = traits.Float(.05, desc="Length of hold variance required at lfp targets")
hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error")
powercap_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error")
# max_attempts = traits.Int(10, desc='The number of attempts at a target before\
# skipping to the next one')
session_length = traits.Float(0, desc="Time until task automatically stops. Length of 0 means no auto stop.")
#plant_hide_rate = traits.Float(0.0, desc='If the plant is visible, specifies a percentage of trials where it will be hidden')
lfp_target_color = (123/256.,22/256.,201/256.,.5)
mc_target_color = (1,0,0,.5)
target_index = -1 # Helper variable to keep track of which target to display within a trial
#tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial.
cursor_visible = False # Determines when to hide the cursor.
no_data_count = 0 # Counter for number of missing data frames in a row
sequence_generators = ['lfp_mod_4targ']
def __init__(self, *args, **kwargs):
super(LFP_Mod, self).__init__(*args, **kwargs)
self.cursor_visible = True
print('INIT FRAC LIMS: ', self.lfp_frac_lims)
dec_params = dict(lfp_frac_lims = self.lfp_frac_lims,
xlfp_frac_lims = self.xlfp_frac_lims,
powercap = self.powercap,
zboundaries = self.zboundaries,
lfp_control_band = self.lfp_control_band,
lfp_totalpw_band = self.lfp_totalpw_band,
xlfp_control_band = self.xlfp_control_band,
n_steps = self.n_steps)
self.decoder.filt.init_from_task(**dec_params)
self.decoder.init_from_task(**dec_params)
self.lfp_plant = plantlist[self.lfp_plant_type]
if self.lfp_plant_type == 'inv_cursor_onedimLFP':
print('MAKE SURE INVERSE GENERATOR IS ON')
self.plant_vis_prev = True
self.current_assist_level = 0
self.learn_flag = False
if hasattr(self.lfp_plant, 'graphics_models'):
for model in self.lfp_plant.graphics_models:
self.add_model(model)
# Instantiate the targets
'''
height and width on kinarm machine are 2.4. Here we make it 2.4/8*12 = 3.6
'''
lfp_target = VirtualSquareTarget(target_radius=self.lfp_target_rad, target_color=self.lfp_target_color)
self.targets = [lfp_target]
# Initialize target location variable
self.target_location_lfp = np.array([-100, -100, -100])
# Declare any plant attributes which must be saved to the HDF file at the _cycle rate
for attr in self.lfp_plant.hdf_attrs:
self.add_dtype(*attr)
def init(self):
self.plant = DummyPlant()
self.add_dtype('lfp_target', 'f8', (3,))
self.add_dtype('target_index', 'i', (1,))
self.add_dtype('powercap_flag', 'i',(1,))
for target in self.targets:
for model in target.graphics_models:
self.add_model(model)
super(LFP_Mod, self).init()
def _cycle(self):
'''
Calls any update functions necessary and redraws screen. Runs 60x per second.
'''
self.task_data['loop_time'] = self.iter_time()
self.task_data['lfp_target'] = self.target_location_lfp.copy()
self.task_data['target_index'] = self.target_index
#self.task_data['internal_decoder_state'] = self.decoder.filt.current_lfp_pos
self.task_data['powercap_flag'] = self.decoder.filt.current_powercap_flag
self.move_plant()
## Save plant status to HDF file, ###ADD BACK
lfp_plant_data = self.lfp_plant.get_data_to_save()
for key in lfp_plant_data:
self.task_data[key] = lfp_plant_data[key]
super(LFP_Mod, self)._cycle()
def move_plant(self):
feature_data = self.get_features()
# Save the "neural features" (e.g. spike counts vector) to HDF file
for key, val in feature_data.items():
self.task_data[key] = val
Bu = None
assist_weight = 0
target_state = np.zeros([self.decoder.n_states, self.decoder.n_subbins])
## Run the decoder
if self.state not in self.static_states:
neural_features = feature_data[self.extractor.feature_type]
self.call_decoder(neural_features, target_state, Bu=Bu, assist_level=assist_weight, feature_type=self.extractor.feature_type)
## Drive the plant to the decoded state, if permitted by the constraints of the plant
self.lfp_plant.drive(self.decoder)
self.task_data['decoder_state'] = decoder_state = self.decoder.get_state(shape=(-1,1))
return decoder_state
def run(self):
'''
See experiment.Experiment.run for documentation.
'''
# Fire up the plant. For virtual/simulation plants, this does little/nothing.
self.lfp_plant.start()
try:
super(LFP_Mod, self).run()
finally:
self.lfp_plant.stop()
##### HELPER AND UPDATE FUNCTIONS ####
def update_cursor_visibility(self):
''' Update cursor visible flag to hide cursor if there has been no good data for more than 3 frames in a row'''
prev = self.cursor_visible
if self.no_data_count < 3:
self.cursor_visible = True
if prev != self.cursor_visible:
self.show_object(self.cursor, show=True)
else:
self.cursor_visible = False
if prev != self.cursor_visible:
self.show_object(self.cursor, show=False)
def update_report_stats(self):
'''
see experiment.Experiment.update_report_stats for docs
'''
super(LFP_Mod, self).update_report_stats()
self.reportstats['Trial #'] = self.calc_trial_num()
self.reportstats['Reward/min'] = np.round(self.calc_events_per_min('reward', 120), decimals=2)
#### TEST FUNCTIONS ####
def _test_powercap_penalty(self, ts):
if self.decoder.filt.current_powercap_flag:
#Turn off power cap flag:
self.decoder.filt.current_powercap_flag = 0
return True
else:
return False
def _test_enter_lfp_target(self, ts):
'''
return true if the distance between center of cursor and target is smaller than the cursor radius in the x and z axis only
'''
cursor_pos = self.lfp_plant.get_endpoint_pos()
dx = np.linalg.norm(cursor_pos[0] - self.target_location_lfp[0])
dz = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2])
in_targ = False
if dx<= (self.lfp_target_rad/2.) and dz<= (self.lfp_target_rad/2.):
in_targ = True
return in_targ
# #return d <= (self.lfp_target_rad - self.lfp_cursor_rad)
# #If center of cursor enters target at all:
# return d <= (self.lfp_target_rad/2.)
# #New version:
# cursor_pos = self.lfp_plant.get_endpoint_pos()
# d = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2])
# d <= (self.lfp_target_rad - self.lfp_cursor_rad)
def _test_leave_early(self, ts):
'''
return true if cursor moves outside the exit radius
'''
cursor_pos = self.lfp_plant.get_endpoint_pos()
dx = np.linalg.norm(cursor_pos[0] - self.target_location_lfp[0])
dz = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2])
out_of_targ = False
if dx > (self.lfp_target_rad/2.) or dz > (self.lfp_target_rad/2.):
out_of_targ = True
#rad = self.lfp_target_rad - self.lfp_cursor_rad
#return d > rad
return out_of_targ
def _test_lfp_hold_complete(self, ts):
return ts>=self.lfp_hold_time_plus_var
# def _test_lfp_timeout(self, ts):
# return ts>self.timeout_time
def _test_powercap_penalty_end(self, ts):
if ts>self.powercap_penalty_time:
self.lfp_plant.turn_on()
return ts>self.powercap_penalty_time
def _test_reward_end(self, ts):
return ts>self.reward_time
def _test_stop(self, ts):
if self.session_length > 0 and (self.get_time() - self.task_start_time) > self.session_length:
self.end_task()
return self.stop
#### STATE FUNCTIONS ####
def _parse_next_trial(self):
self.targs = self.next_trial
def _start_wait(self):
super(LFP_Mod, self)._start_wait()
self.tries = 0
self.target_index = -1
#hide targets
for target in self.targets:
target.hide()
#get target locations for this trial
self._parse_next_trial()
self.chain_length = 1
self.lfp_hold_time_plus_var = self.lfp_hold_time + np.random.uniform(low=-1,high=1)*self.lfp_hold_var
def _start_lfp_target(self):
self.target_index += 1
self.target_index = 0
#only 1 target:
target = self.targets[0]
self.target_location_lfp = self.targs #Just one target.
target.move_to_position(self.target_location_lfp)
target.cue_trial_start()
def _start_lfp_hold(self):
#make next target visible unless this is the final target in the trial
idx = (self.target_index + 1)
if idx < self.chain_length:
target = self.targets[idx % 2]
target.move_to_position(self.targs[idx])
def _end_lfp_hold(self):
# change current target color to green
self.targets[self.target_index % 2].cue_trial_end_success()
def _start_timeout_penalty(self):
#hide targets
for target in self.targets:
target.hide()
self.tries += 1
self.target_index = -1
def _start_reward(self):
#super(LFP_Mod, self)._start_reward()
self.targets[self.target_index % 2].show()
def _start_powercap_penalty(self):
for target in self.targets:
target.hide()
self.lfp_plant.turn_off()
@staticmethod
def lfp_mod_4targ(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8):
'''Mimics beta modulation task from Kinarm Rig:
In Kinarm rig, the following linear transformations happen:
1. LFP cursor is calculated
2. mapped from fraction limits [0, .35] to [-1, 1] (unit_coordinates)
3. udp sent to kinarm machine and multiplied by 8
4. translated upward in the Y direction by + 2.5
This means, our targets which are at -8, [-0.75, 2.5, 5.75, 9.0]
must be translated down by 2.5 to: -8, [-3.25, 0. , 3.25, 6.5]
then divided by 8: -1, [-0.40625, 0. , 0.40625, 0.8125 ] in unit_coordinates
The radius is 1.2, which is 0.15 in unit_coordinates
Now, we map this to a new system:
- new_zero: (y1+y2) / 2
- new_scale: (y2 - y1) / 2
(([-0.40625, 0. , 0.40625, 0.8125 ]) * new_scale ) + new_zero
new_zero = 0
new_scale = 12
12 * [-0.40625, 0. , 0.40625, 0.8125 ]
= array([-4.875, 0. , 4.875, 9.75 ])
'''
new_zero = (boundaries[3]+boundaries[2]) / 2.
new_scale = (boundaries[3] - boundaries[2]) / 2.
kin_targs = np.array([-0.40625, 0. , 0.40625, 0.8125 ])
lfp_targ_y = (new_scale*kin_targs) + new_zero
for i in range(nblocks):
temp = lfp_targ_y.copy()
np.random.shuffle(temp)
if i==0:
z = temp.copy()
else:
z = np.hstack((z, temp))
#Fixed X axis:
x = np.tile(xaxis,(nblocks*4))
y = np.zeros(nblocks*4)
pairs = np.vstack([x, y, z]).T
return pairs
class LFP_Mod_plus_MC_hold(LFP_Mod):
mc_cursor_radius = traits.Float(.5, desc="Radius of cursor")
mc_target_radius = traits.Float(3, desc="Radius of MC target")
mc_cursor_color = (.5,0,.5,1)
mc_plant_type_options = plantlist.keys()
mc_plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=plantlist.keys())
origin_hold_time = traits.Float(.2, desc="Hold time in center")
exclude_parent_traits = ['goal_cache_block'] #redefine this to NOT include marker_num, marker_count
marker_num = traits.Int(14,desc='Index')
marker_count = traits.Int(16,desc='Num of markers')
joystick_method = traits.Float(1,desc="1: Normal velocity, 0: Position control")
joystick_speed = traits.Float(20, desc="Radius of cursor")
move_while_in_center = traits.Float(1, desc="1 = update plant while in lfp_target, lfp_hold, 0 = don't update in these states")
scale_factor = 3.0 #scale factor for converting hand movement to screen movement (1cm hand movement = 3.5cm cursor movement)
wait_flag = 1
# NOTE!!! The marker on the hand was changed from #0 to #14 on
# 5/19/13 after LED #0 broke. All data files saved before this date
# have LED #0 controlling the cursor.
limit2d = 1
status = dict(
wait = dict(start_trial="origin", stop=None),
origin = dict(enter_origin="origin_hold", stop=None),
origin_hold = dict(origin_hold_complete="lfp_target",leave_origin="hold_penalty", stop=None),
lfp_target = dict(enter_lfp_target="lfp_hold", leave_origin="hold_penalty", powercap_penalty="powercap_penalty", stop=None),
lfp_hold = dict(leave_early="lfp_target", lfp_hold_complete="reward", leave_origin="hold_penalty", powercap_penalty="powercap_penalty",stop=None),
powercap_penalty = dict(powercap_penalty_end="origin"),
hold_penalty = dict(hold_penalty_end="origin",stop=None),
reward = dict(reward_end="wait")
)
static_states = ['origin'] # states in which the decoder is not run
trial_end_states = ['reward']
lfp_cursor_on = ['lfp_target', 'lfp_hold', 'reward']
sequence_generators = ['lfp_mod_4targ_plus_mc_orig']
def __init__(self, *args, **kwargs):
super(LFP_Mod_plus_MC_hold, self).__init__(*args, **kwargs)
if self.move_while_in_center>0:
self.no_plant_update_states = []
else:
self.no_plant_update_states = ['lfp_target', 'lfp_hold']
mc_origin = VirtualCircularTarget(target_radius=self.mc_target_radius, target_color=RED)
lfp_target = VirtualSquareTarget(target_radius=self.lfp_target_rad, target_color=self.lfp_target_color)
self.targets = [lfp_target, mc_origin]
self.mc_plant = plantlist[self.mc_plant_type]
if hasattr(self.mc_plant, 'graphics_models'):
for model in self.mc_plant.graphics_models:
self.add_model(model)
# Declare any plant attributes which must be saved to the HDF file at the _cycle rate
for attr in self.mc_plant.hdf_attrs:
self.add_dtype(*attr)
self.target_location_mc = np.array([-100, -100, -100])
self.manual_control_type = None
self.current_pt=np.zeros([3]) #keep track of current pt
self.last_pt=np.zeros([3])
def init(self):
self.add_dtype('mc_targ', 'f8', (3,)) ###ADD BACK
super(LFP_Mod_plus_MC_hold, self).init()
def _cycle(self):
'''
Calls any update functions necessary and redraws screen. Runs 60x per second.
'''
self.task_data['mc_targ'] = self.target_location_mc.copy()
mc_plant_data = self.mc_plant.get_data_to_save()
for key in mc_plant_data:
self.task_data[key] = mc_plant_data[key]
super(LFP_Mod_plus_MC_hold, self)._cycle()
def _parse_next_trial(self):
t = self.next_trial
self.lfp_targ = t['lfp']
self.mc_targ_orig = t['origin']
def _start_origin(self):
if self.wait_flag:
self.origin_hold_time_store = self.origin_hold_time
self.origin_hold_time = 3
self.wait_flag = 0
else:
self.origin_hold_time = self.origin_hold_time_store
#only 1 target:
target = self.targets[1] #Origin
self.target_location_mc = self.mc_targ_orig #Origin
target.move_to_position(self.target_location_mc)
target.cue_trial_start()
#Turn off lfp things
self.lfp_plant.turn_off()
self.targets[0].hide()
def _start_lfp_target(self):
#only 1 target:
target = self.targets[0] #LFP target
self.target_location_lfp = self.lfp_targ #LFP target
target.move_to_position(self.target_location_lfp)
target.cue_trial_start()
self.lfp_plant.turn_on()
def _start_lfp_hold(self):
#make next target visible unless this is the final target in the trial
pass
def _start_hold_penalty(self):
#hide targets
for target in self.targets:
target.hide()
self.tries += 1
self.target_index = -1
#Turn off lfp things
self.lfp_plant.turn_off()
self.targets[0].hide()
def _end_origin(self):
self.targets[1].cue_trial_end_success()
def _test_enter_origin(self, ts):
cursor_pos = self.mc_plant.get_endpoint_pos()
d = np.linalg.norm(cursor_pos - self.target_location_mc)
return d <= (self.mc_target_radius - self.mc_cursor_radius)
# def _test_origin_timeout(self, ts):
# return ts>self.timeout_time
def _test_leave_origin(self, ts):
if self.manual_control_type == 'joystick':
if hasattr(self,'touch'):
if self.touch <0.5:
self.last_touch_zero_event = time.time()
return True
cursor_pos = self.mc_plant.get_endpoint_pos()
d = np.linalg.norm(cursor_pos - self.target_location_mc)
return d > (self.mc_target_radius - self.mc_cursor_radius)
def _test_origin_hold_complete(self,ts):
return ts>=self.origin_hold_time
# def _test_enter_lfp_target(self, ts):
# '''
# return true if the distance between center of cursor and target is smaller than the cursor radius
# '''
# cursor_pos = self.lfp_plant.get_endpoint_pos()
# cursor_pos = [cursor_pos[0], cursor_pos[2]]
# targ_loc = np.array([self.target_location_lfp[0], self.target_location_lfp[2]])
# d = np.linalg.norm(cursor_pos - targ_loc)
# return d <= (self.lfp_target_rad - self.lfp_cursor_rad)
# def _test_leave_early(self, ts):
# '''
# return true if cursor moves outside the exit radius
# '''
# cursor_pos = self.lfp_plant.get_endpoint_pos()
# d = np.linalg.norm(cursor_pos - self.target_location_lfp)
# rad = self.lfp_target_rad - self.lfp_cursor_rad
# return d > rad
def _test_hold_penalty_end(self, ts):
return ts>self.hold_penalty_time
def _end_lfp_hold(self):
# change current target color to green
self.targets[0].cue_trial_end_success()
def move_plant(self):
if self.state in self.lfp_cursor_on:
feature_data = self.get_features()
# Save the "neural features" (e.g. spike counts vector) to HDF file
for key, val in feature_data.items():
self.task_data[key] = val
Bu = None
assist_weight = 0
target_state = np.zeros([self.decoder.n_states, self.decoder.n_subbins])
## Run the decoder
neural_features = feature_data[self.extractor.feature_type]
self.call_decoder(neural_features, target_state, Bu=Bu, assist_level=assist_weight, feature_type=self.extractor.feature_type)
## Drive the plant to the decoded state, if permitted by the constraints of the plant
self.lfp_plant.drive(self.decoder)
self.task_data['decoder_state'] = decoder_state = self.decoder.get_state(shape=(-1,1))
#return decoder_state
#Sets the plant configuration based on motiontracker data. For manual control, uses
#motiontracker data. If no motiontracker data available, returns None'''
#get data from motion tracker- take average of all data points since last poll
if self.state in self.no_plant_update_states:
pt = np.array([0, 0, 0])
print('no update')
else:
if self.manual_control_type == 'motiondata':
pt = self.motiondata.get()
if len(pt) > 0:
pt = pt[:, self.marker_num, :]
conds = pt[:, 3]
inds = np.nonzero((conds>=0) & (conds!=4))[0]
if len(inds) > 0:
pt = pt[inds,:3]
#scale actual movement to desired amount of screen movement
pt = pt.mean(0) * self.scale_factor
#Set y coordinate to 0 for 2D tasks
if self.limit2d:
#pt[1] = 0
pt[2] = pt[1].copy()
pt[1] = 0
pt[1] = pt[1]*2
# Return cursor location
self.no_data_count = 0
pt = pt * mm_per_cm #self.convert_to_cm(pt)
else: #if no usable data
self.no_data_count += 1
pt = None
else: #if no new data
self.no_data_count +=1
pt = None
elif self.manual_control_type == 'joystick':
pt = self.joystick.get()
#if touch sensor on:
try:
self.touch = pt[-1][0][2]
except:
pass
if len(pt) > 0:
pt = pt[-1][0]
pt[0]=1-pt[0]; #Switch L / R axes
calib = [0.497,0.517] #Sometimes zero point is subject to drift this is the value of the incoming joystick when at 'rest'
if self.joystick_method==0:
#pt = pt[-1][0]
#pt[0]=1-pt[0]; #Switch L / R axes
#calib = [0.497,0.517] #Sometimes zero point is subject to drift this is the value of the incoming joystick when at 'rest'
# calib = [ 0.487, 0. ]
pos = np.array([(pt[0]-calib[0]), 0, calib[1]-pt[1]])
pos[0] = pos[0]*36
pos[2] = pos[2]*24
self.current_pt = pos
elif self.joystick_method==1:
vel=np.array([(pt[0]-calib[0]), 0, calib[1]-pt[1]])
epsilon = 2*(10**-2) #Define epsilon to stabilize cursor movement
if sum((vel)**2) > epsilon:
self.current_pt=self.last_pt+self.joystick_speed*vel*(1/60) #60 Hz update rate, dt = 1/60
else:
self.current_pt = self.last_pt
if self.current_pt[0] < -25: self.current_pt[0] = -25
if self.current_pt[0] > 25: self.current_pt[0] = 25
if self.current_pt[-1] < -14: self.current_pt[-1] = -14
if self.current_pt[-1] > 14: self.current_pt[-1] = 14
pt = self.current_pt
#self.plant.set_endpoint_pos(self.current_pt)
self.last_pt = self.current_pt.copy()
elif self.manual_control_type == None:
pt = None
try:
pt0 = self.motiondata.get()
self.manual_control_type='motiondata'
except:
print('not motiondata')
try:
pt0 = self.joystick.get()
self.manual_control_type = 'joystick'
except:
print('not joystick data')
# Set the plant's endpoint to the position determined by the motiontracker, unless there is no data available
if self.manual_control_type is not None:
if pt is not None and len(pt)>0:
self.mc_plant.set_endpoint_pos(pt)
@staticmethod
def lfp_mod_4targ_plus_mc_orig(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8):
'''
See lfp_mod_4targ for lfp target explanation
'''
new_zero = (boundaries[3]+boundaries[2]) / 2.
new_scale = (boundaries[3] - boundaries[2]) / 2.
kin_targs = np.array([-0.40625, 0. , 0.40625, 0.8125 ])
lfp_targ_y = (new_scale*kin_targs) + new_zero
for i in range(nblocks):
temp = lfp_targ_y.copy()
np.random.shuffle(temp)
if i==0:
z = temp.copy()
else:
z = np.hstack((z, temp))
#Fixed X axis:
x = np.tile(xaxis,(nblocks*4))
y = np.zeros(nblocks*4)
lfp = np.vstack([x, y, z]).T
origin = np.zeros(( lfp.shape ))
it = iter([dict(lfp=lfp[i,:], origin=origin[i,:]) for i in range(lfp.shape[0])])
return it
class LFP_Mod_plus_MC_reach(LFP_Mod_plus_MC_hold):
mc_cursor_radius = traits.Float(.5, desc="Radius of cursor")
mc_target_radius = traits.Float(3, desc="Radius of MC target")
mc_cursor_color = (.5,0,.5,1)
mc_plant_type_options = plantlist.keys()
mc_plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=plantlist.keys())
origin_hold_time = traits.Float(.2, desc="Hold time in center")
mc_periph_holdtime = traits.Float(.2, desc="Hold time in center")
mc_timeout_time = traits.Float(10, desc="Time allowed to go between targets")
exclude_parent_traits = ['goal_cache_block'] #redefine this to NOT include marker_num, marker_count
marker_num = traits.Int(14,desc='Index')
marker_count = traits.Int(16,desc='Num of markers')
scale_factor = 3.0 #scale factor for converting hand movement to screen movement (1cm hand movement = 3.5cm cursor movement)
wait_flag = 1
# NOTE!!! The marker on the hand was changed from #0 to #14 on
# 5/19/13 after LED #0 broke. All data files saved before this date
# have LED #0 controlling the cursor.
limit2d = 1
# state_file = open("/home/helene/preeya/tot_pw.txt","w")
state_cnt = 0
status = dict(
wait = dict(start_trial="origin", stop=None),
origin = dict(enter_origin="origin_hold", stop=None),
origin_hold = dict(origin_hold_complete="lfp_target",leave_origin="hold_penalty", stop=None),
lfp_target = dict(enter_lfp_target="lfp_hold", leave_origin="hold_penalty", powercap_penalty="powercap_penalty", stop=None),
lfp_hold = dict(leave_early="lfp_target", lfp_hold_complete="mc_target", leave_origin="hold_penalty",powercap_penalty="powercap_penalty"),
mc_target = dict(enter_mc_target='mc_hold',mc_timeout="timeout_penalty", stop=None),
mc_hold = dict(leave_periph_early='hold_penalty',mc_hold_complete="reward"),
powercap_penalty = dict(powercap_penalty_end="origin"),
timeout_penalty = dict(timeout_penalty_end="wait"),
hold_penalty = dict(hold_penalty_end="origin"),
reward = dict(reward_end="wait"),
)
static_states = ['origin'] # states in which the decoder is not run
trial_end_states = ['reward', 'timeout_penalty']
lfp_cursor_on = ['lfp_target', 'lfp_hold', 'reward']
sequence_generators = ['lfp_mod_plus_MC_reach', 'lfp_mod_plus_MC_reach_INV']
def __init__(self, *args, **kwargs):
# import pickle
# decoder = pickle.load(open('/storage/decoders/cart20141216_03_cart_new2015_2.pkl'))
# self.decoder = decoder
super(LFP_Mod_plus_MC_reach, self).__init__(*args, **kwargs)
mc_origin = VirtualCircularTarget(target_radius=self.mc_target_radius, target_color=RED)
mc_periph = VirtualCircularTarget(target_radius=self.mc_target_radius, target_color=RED)
lfp_target = VirtualSquareTarget(target_radius=self.lfp_target_rad, target_color=self.lfp_target_color)
self.targets = [lfp_target, mc_origin, mc_periph]
# #Should be unnecessary:
# for target in self.targets:
# for model in target.graphics_models:
# self.add_model(model)
# self.lfp_plant = plantlist[self.lfp_plant_type]
# if hasattr(self.lfp_plant, 'graphics_models'):
# for model in self.lfp_plant.graphics_models:
# self.add_model(model)
# self.mc_plant = plantlist[self.mc_plant_type]
# if hasattr(self.mc_plant, 'graphics_models'):
# for model in self.mc_plant.graphics_models:
# self.add_model(model)
def _parse_next_trial(self):
t = self.next_trial
self.lfp_targ = t['lfp']
self.mc_targ_orig = t['origin']
self.mc_targ_periph = t['periph']
def _start_mc_target(self):
#Turn off LFP things
self.lfp_plant.turn_off()
self.targets[0].hide()
self.targets[1].hide()
target = self.targets[2] #MC target
self.target_location_mc = self.mc_targ_periph
target.move_to_position(self.target_location_mc)
target.cue_trial_start()
def _test_enter_mc_target(self,ts):
cursor_pos = self.mc_plant.get_endpoint_pos()
d = np.linalg.norm(cursor_pos - self.target_location_mc)
return d <= (self.mc_target_radius - self.mc_cursor_radius)
def _test_mc_timeout(self, ts):
return ts>self.mc_timeout_time
def _test_leave_periph_early(self, ts):
cursor_pos = self.mc_plant.get_endpoint_pos()
d = np.linalg.norm(cursor_pos - self.target_location_mc)
rad = self.mc_target_radius - self.mc_cursor_radius
return d > rad
def _test_mc_hold_complete(self, ts):
return ts>self.mc_periph_holdtime
def _timeout_penalty_end(self, ts):
print('timeout', ts)
#return ts > 1.
return True
def _end_mc_hold(self):
self.targets[2].cue_trial_end_success()
# def _cycle(self):
# if self.state_cnt < 3600*3:
# self.state_cnt +=1
# s = "%s\n" % self.state
# self.state_file.write(str(s))
# if self.state_cnt == 3600*3:
# self.state_file.close()
# super(LFP_Mod_plus_MC_reach, self)._cycle()
def _start_reward(self):
super(LFP_Mod_plus_MC_reach, self)._start_reward()
lfp_targ = self.targets[0]
mc_orig = self.targets[1]
lfp_targ.hide()
mc_orig.hide()
@staticmethod
def lfp_mod_plus_MC_reach(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8, target_distance=6, n_mc_targets=4, mc_target_angle_offset=0,**kwargs):
new_zero = (boundaries[3]+boundaries[2]) / 2.
new_scale = (boundaries[3] - boundaries[2]) / 2.
kin_targs = np.array([-0.40625, 0. , 0.40625, 0.8125 ])
lfp_targ_y = (new_scale*kin_targs) + new_zero
for i in range(nblocks):
temp = lfp_targ_y.copy()
np.random.shuffle(temp)
if i==0:
z = temp.copy()
else:
z = np.hstack((z, temp))
#Fixed X axis:
x = np.tile(xaxis,(nblocks*4))
y = np.zeros(nblocks*4)
lfp = np.vstack([x, y, z]).T
origin = np.zeros(( lfp.shape ))
theta = []
for i in range(nblocks*4):
temp = np.arange(0, 2*np.pi, 2*np.pi/float(n_mc_targets))
np.random.shuffle(temp)
theta = theta + [temp]
theta = np.hstack(theta)
theta = theta + (mc_target_angle_offset*(np.pi/180.))
x = target_distance*np.cos(theta)
y = np.zeros(len(theta))
z = target_distance*np.sin(theta)
periph = np.vstack([x, y, z]).T
it = iter([dict(lfp=lfp[i,:], origin=origin[i,:], periph=periph[i,:]) for i in range(lfp.shape[0])])
if ('return_arrays' in kwargs.keys()) and kwargs['return_arrays']==True:
return lfp, origin, periph
else:
return it
@staticmethod
def lfp_mod_plus_MC_reach_INV(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8, target_distance=6, n_mc_targets=4, mc_target_angle_offset=0):
kw = dict(return_arrays=True)
lfp, origin, periph = LFP_Mod_plus_MC_reach.lfp_mod_plus_MC_reach(nblocks=nblocks, boundaries=boundaries, xaxis=xaxis, target_distance=target_distance,
n_mc_targets=n_mc_targets, mc_target_angle_offset=mc_target_angle_offset,**kw)
#Invert LFP:
lfp[:,2] = -1.0*lfp[:,2]
it = iter([dict(lfp=lfp[i,:], origin=origin[i,:], periph=periph[i,:]) for i in range(lfp.shape[0])])
return it
class DummyPlant(object):
def __init__(self,*args,**kwargs):
self.desc = 'dummy_plant object'
def get_intrinsic_coordinates(self):
return None
| 39.035247
| 161
| 0.599453
|
4a19494c9c70cf552815402d2169960d24fd1849
| 2,898
|
py
|
Python
|
examples/slds.py
|
bogiebro/funsor
|
c15eaf7019e34c647630ed3da89001e620a972fa
|
[
"Apache-2.0"
] | null | null | null |
examples/slds.py
|
bogiebro/funsor
|
c15eaf7019e34c647630ed3da89001e620a972fa
|
[
"Apache-2.0"
] | null | null | null |
examples/slds.py
|
bogiebro/funsor
|
c15eaf7019e34c647630ed3da89001e620a972fa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import argparse
import torch
import funsor
import funsor.torch.distributions as dist
import funsor.ops as ops
def main(args):
# Declare parameters.
trans_probs = funsor.Tensor(torch.tensor([[0.9, 0.1],
[0.1, 0.9]], requires_grad=True))
trans_noise = funsor.Tensor(torch.tensor([
0.1, # low noise component
1.0, # high noisy component
], requires_grad=True))
emit_noise = funsor.Tensor(torch.tensor(0.5, requires_grad=True))
params = [trans_probs.data,
trans_noise.data,
emit_noise.data]
# A Gaussian HMM model.
@funsor.interpreter.interpretation(funsor.terms.moment_matching)
def model(data):
log_prob = funsor.Number(0.)
# s is the discrete latent state,
# x is the continuous latent state,
# y is the observed state.
s_curr = funsor.Tensor(torch.tensor(0), dtype=2)
x_curr = funsor.Tensor(torch.tensor(0.))
for t, y in enumerate(data):
s_prev = s_curr
x_prev = x_curr
# A delayed sample statement.
s_curr = funsor.Variable('s_{}'.format(t), funsor.bint(2))
log_prob += dist.Categorical(trans_probs[s_prev], value=s_curr)
# A delayed sample statement.
x_curr = funsor.Variable('x_{}'.format(t), funsor.reals())
log_prob += dist.Normal(x_prev, trans_noise[s_curr], value=x_curr)
# Marginalize out previous delayed sample statements.
if t > 0:
log_prob = log_prob.reduce(ops.logaddexp, {s_prev.name, x_prev.name})
# An observe statement.
log_prob += dist.Normal(x_curr, emit_noise, value=y)
log_prob = log_prob.reduce(ops.logaddexp)
return log_prob
# Train model parameters.
torch.manual_seed(0)
data = torch.randn(args.time_steps)
optim = torch.optim.Adam(params, lr=args.learning_rate)
for step in range(args.train_steps):
optim.zero_grad()
log_prob = model(data)
assert not log_prob.inputs, 'free variables remain'
loss = -log_prob.data
loss.backward()
optim.step()
if args.verbose and step % 10 == 0:
print('step {} loss = {}'.format(step, loss.item()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Switching linear dynamical system")
parser.add_argument("-t", "--time-steps", default=10, type=int)
parser.add_argument("-n", "--train-steps", default=101, type=int)
parser.add_argument("-lr", "--learning-rate", default=0.01, type=float)
parser.add_argument("--filter", action='store_true')
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()
main(args)
| 35.341463
| 85
| 0.622498
|
4a194a01fe52f861512ad6d10538ce1c97ba3d9d
| 424
|
py
|
Python
|
apps/service/migrations/0008_auto_20210904_1623.py
|
Xoma163/Petrovich
|
026e246f6b7d492d9be2dea205e351ac83acd89e
|
[
"MIT"
] | 4
|
2020-12-25T16:17:53.000Z
|
2022-01-19T15:06:19.000Z
|
apps/service/migrations/0008_auto_20210904_1623.py
|
Xoma163/Petrovich
|
026e246f6b7d492d9be2dea205e351ac83acd89e
|
[
"MIT"
] | 294
|
2020-07-17T15:45:21.000Z
|
2022-03-27T10:24:01.000Z
|
apps/service/migrations/0008_auto_20210904_1623.py
|
Xoma163/Petrovich
|
026e246f6b7d492d9be2dea205e351ac83acd89e
|
[
"MIT"
] | 3
|
2020-12-30T17:14:24.000Z
|
2021-12-19T09:14:22.000Z
|
# Generated by Django 3.1.12 on 2021-09-04 12:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service', '0007_auto_20210904_1621'),
]
operations = [
migrations.AlterField(
model_name='notify',
name='crontab',
field=models.CharField(max_length=100, null=True, verbose_name='Crontab'),
),
]
| 22.315789
| 86
| 0.615566
|
4a194a5ca9aa84d6f5f8383a14ea92b60dfa4f56
| 1,133
|
py
|
Python
|
tests/integration/test_logger.py
|
deepio-oc/agent-Python-RobotFramework
|
da468a307910a0d2e3d0f6c11317f7cc2577e1d2
|
[
"Apache-2.0"
] | 42
|
2016-09-15T10:08:57.000Z
|
2022-03-23T10:00:46.000Z
|
tests/integration/test_logger.py
|
MAIF/agent-Python-RobotFramework
|
37db7c5cf06928683171465adb0d28fa8bfec18b
|
[
"Apache-2.0"
] | 100
|
2016-12-05T10:12:30.000Z
|
2022-03-15T16:25:15.000Z
|
tests/integration/test_logger.py
|
MAIF/agent-Python-RobotFramework
|
37db7c5cf06928683171465adb0d28fa8bfec18b
|
[
"Apache-2.0"
] | 42
|
2016-12-05T11:08:02.000Z
|
2021-12-07T09:36:13.000Z
|
"""
Copyright (c) 2021 http://reportportal.io .
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
"""
from tests.helpers import utils
from six.moves import mock
from tests import REPORT_PORTAL_SERVICE
@mock.patch(REPORT_PORTAL_SERVICE)
def test_launch_log(mock_client_init):
result = utils.run_robot_tests(['examples/launch_log.robot'])
assert result == 0 # the test successfully passed
mock_client = mock_client_init.return_value
calls = utils.get_launch_log_calls(mock_client)
assert len(calls) == 3
messages = set(map(lambda x: x[1]['message'], calls))
assert messages == {'Hello, world!', 'Goodbye, world!', 'Enjoy my pug!'}
| 34.333333
| 76
| 0.759047
|
4a194aaaa85b49132e0cbc07c9714b958e2cba2d
| 2,155
|
py
|
Python
|
accounts/forms.py
|
MrMohammadY/bmi
|
cabfdfb9d580f6e4cdea9b0629d4dfcf7090ba22
|
[
"MIT"
] | 2
|
2021-12-16T10:56:45.000Z
|
2022-01-27T17:21:47.000Z
|
accounts/forms.py
|
MrMohammadY/bmi
|
cabfdfb9d580f6e4cdea9b0629d4dfcf7090ba22
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
MrMohammadY/bmi
|
cabfdfb9d580f6e4cdea9b0629d4dfcf7090ba22
|
[
"MIT"
] | null | null | null |
from django import forms
from django.contrib.auth import get_user_model, authenticate
from django.contrib.auth.password_validation import validate_password
from django.core.exceptions import ValidationError
from django.db.models import Q
User = get_user_model()
class RegistrationForm(forms.ModelForm):
password = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control'})
)
confirm_password = forms.CharField(
widget=forms.PasswordInput(attrs={'class': 'form-control'})
)
class Meta:
model = User
fields = ('username', 'email', 'password', 'confirm_password')
widgets = {
'username': forms.TextInput(attrs={'class': 'form-control'}),
'email': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'google@gmail.com'}),
}
help_texts = {
'username': None,
}
def clean_confirm_password(self):
validate_password(self.cleaned_data['password'])
if self.cleaned_data['password'] != self.cleaned_data['confirm_password']:
raise ValidationError('passwords not equal!')
return self.cleaned_data['confirm_password']
class LoginForm(forms.Form):
username = forms.CharField(
min_length=3,
widget=forms.TextInput(
attrs={
'placeholder': 'Enter Username or Email',
'class': 'form-control'}
)
)
password = forms.CharField(
max_length=30,
min_length=4,
widget=forms.PasswordInput(
attrs={
'placeholder': 'Password',
'class': 'form-control'}
)
)
def clean(self):
cleaned_data = super().clean()
username = cleaned_data['username']
password = cleaned_data['password']
user = User.objects.filter(Q(username=username) | Q(email=username)).first()
if user and user.check_password(password):
cleaned_data['user'] = authenticate(username=username, password=password)
return cleaned_data
raise ValidationError('username or password invalid!')
| 30.785714
| 105
| 0.62413
|
4a194dc983c3cddc3e3e3bb82e51b0c6c48fbf9f
| 30,489
|
py
|
Python
|
dual_task/emr_run.py
|
thaihungle/DMNC
|
3ce17a9277bbeeb8125b588e86cc8aace67a0924
|
[
"MIT"
] | 11
|
2018-04-26T10:56:03.000Z
|
2021-03-20T08:46:25.000Z
|
dual_task/emr_run.py
|
thaihungle/DMNC
|
3ce17a9277bbeeb8125b588e86cc8aace67a0924
|
[
"MIT"
] | 1
|
2018-07-21T19:29:57.000Z
|
2018-08-03T06:14:52.000Z
|
dual_task/emr_run.py
|
thaihungle/DMNC
|
3ce17a9277bbeeb8125b588e86cc8aace67a0924
|
[
"MIT"
] | 5
|
2019-02-20T23:51:38.000Z
|
2019-12-05T05:41:55.000Z
|
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
import numpy as np
import pickle
import getopt
import time
import sys
import os
import nltk
from sklearn.metrics import roc_auc_score, f1_score
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
import beam_search
from dual_dnc import Dual_DNC
from cached_dnc.cached_dual_dnc import CachedDual_DNC
from dnc import DNC
from cached_dnc.cached_controller import CachedLSTMController
from recurrent_controller import StatelessRecurrentController
#input_dim assume seq_len x out_dim
def convert_oh2raw(vec_data, decoder_point):
data=np.argmax(vec_data, axis=-1)
inp=[]
for ci,c in enumerate(data):
if ci<decoder_point:
inp.append(c)
return inp
def roc_auc(target_batch, prob_batch):
all_auc_macro=[]
all_auc_micro = []
for b in range(target_batch.shape[0]):
target = np.zeros(prob_batch.shape[-1])
for t in target_batch[b]:
if t>1:
target[t]=1
all_auc_macro.append(roc_auc_score(target, prob_batch[b], average='macro'))
all_auc_micro.append(roc_auc_score(target, prob_batch[b], average='micro'))
return np.mean(all_auc_macro),np.mean(all_auc_micro)
def fscore(target_batch, predict_batch, nlabel):
all_auc_macro=[]
all_auc_micro = []
for b in range(target_batch.shape[0]):
target = np.zeros(nlabel)
predict = np.zeros(nlabel)
for t in target_batch[b]:
if t>1:
target[t]=1
for t in predict_batch[b]:
if t>1:
predict[t]=1
all_auc_macro.append(f1_score(target, predict, average='macro'))
all_auc_micro.append(f1_score(target, predict, average='micro'))
return np.mean(all_auc_macro),np.mean(all_auc_micro)
def set_score_pre(target_batch, predict_batch):
s = []
s2 = []
for b in range(target_batch.shape[0]):
trim_target = []
trim_predict = []
for t in target_batch[b]:
if t > 1:
trim_target.append(t)
for t in predict_batch[b]:
if t > 1:
trim_predict.append(t)
if np.random.rand()>1:
print('{} vs {}'.format(trim_target, trim_predict))
acc = len(set(trim_target).intersection(set(trim_predict)))/len(set(trim_target))
acc2=0
if len(set(trim_predict))>0:
acc2 = len(set(trim_target).intersection(set(trim_predict))) / len(trim_predict)
s.append(acc)
s2.append(acc2)
return np.mean(s2), np.mean(s)#prec, recall
def set_score_pre_jac(target_batch, predict_batch):
s = []
s2 = []
s3 = []
for b in range(target_batch.shape[0]):
trim_target = []
trim_predict = []
for t in target_batch[b]:
if t > 1:
trim_target.append(t)
for t in predict_batch[b]:
if t > 1:
trim_predict.append(t)
if np.random.rand()>0.95:
print('{} vs {}'.format(trim_target, trim_predict))
acc = len(set(trim_target).intersection(set(trim_predict)))/len(set(trim_target))
acc2=0
if len(set(trim_predict))>0:
acc2 = len(set(trim_target).intersection(set(trim_predict))) / len(trim_predict)
acc3=len(set(trim_target).intersection(set(trim_predict)))/len(set(trim_target).union(set(trim_predict)))
s.append(acc)
s2.append(acc2)
s3.append(acc3)
return np.mean(s2), np.mean(s), np.mean(s3)#prec, recall, jaccard
def llprint(message):
sys.stdout.write(message)
sys.stdout.flush()
def load(path):
return pickle.load(open(path, 'rb'))
def onehot(index, size):
# print('-----')
# print(index)
vec = np.zeros(size, dtype=np.float32)
vec[int(index)] = 1.0
return vec
def prepare_mimic_sample_dual(dig_list, proc_list, word_space_size_input1,
word_space_size_input2, word_space_size_output, index=-1, is_raw=False, multi=False):
if index<0:
index = int(np.random.choice(len(dig_list),1))
input_seq = dig_list[index]
# print(input_seq)
i1=[]
i2=[]
isi1=True
for c in input_seq:
if c==0:
isi1=False
else:
if isi1:
i1.append(c)
else:
i2.append(c)
o = proc_list[index]
# print(i1)
# print(i2)
# print(o)
if i2 is []:
i2=[0]
# raise False
maxl=max(len(i1),len(i2))
seq_len = maxl+1+len(o)
decoder_point = maxl + 1
input_vec1 = np.zeros(seq_len,dtype=np.int32)
input_vec2 = np.zeros(seq_len,dtype=np.int32)
output_vec = np.zeros(seq_len,dtype=np.int32)
for iii, token in enumerate(i1):
input_vec1[maxl - len(i1) + iii] = token
input_vec1[maxl] = 1
for iii, token in enumerate(i2):
input_vec2[maxl - len(i2) + iii] = token
input_vec2[maxl] = 1
for iii, token in enumerate(o):
output_vec[decoder_point + iii] = token
if is_raw:
return input_vec1, input_vec2, output_vec,seq_len, decoder_point, maxl-len(i1), maxl-len(i2), o
input_vec1 = np.array([onehot(code, word_space_size_input1) for code in input_vec1])
input_vec2 = np.array([onehot(code, word_space_size_input2) for code in input_vec2])
output_vec = np.array([onehot(code, word_space_size_output) for code in output_vec])
if multi:
# print(output_vec.shape)
output_vec2 = np.zeros((decoder_point+1,word_space_size_output))
for c in range(decoder_point):
output_vec2[c,:]=output_vec[c,:]
for c in range(decoder_point, seq_len):
output_vec2[decoder_point,:]+=output_vec[c,:]
output_vec=output_vec2
# print(output_vec.shape)
seq_len=decoder_point+1
input_vec1 = input_vec1[:seq_len,:]
input_vec2 = input_vec2[:seq_len, :]
return np.reshape(input_vec1, (1, -1, word_space_size_input1)), \
np.reshape(input_vec2, (1, -1, word_space_size_input2)), \
np.reshape(output_vec, (1, -1, word_space_size_output)),\
seq_len, decoder_point, maxl-len(i1), maxl-len(i2), o
def prepare_mimic_sample_dual_persist(patient_list, word_space_size_input1, word_space_size_input2,
word_space_size_output, index=-1, multi=False):
if index<0:
index = int(np.random.choice(len(patient_list),1))
# print('\n{}'.format(index))
patient=patient_list[index]
adms=[]
for adm in patient:
if len(adm)>2:
input_seq = adm[0]
input_seq2 = adm[1]
output_seq = adm[2]
else:
input_seq = adm[0]
input_seq2 = adm[0][::-1]
output_seq = adm[1]
adms.append(prepare_mimic_sample_dual([input_seq+[0]+input_seq2], [output_seq], word_space_size_input1, word_space_size_input2,
word_space_size_output, 0,multi=multi))
return adms
def mimic_task_persit_real_dual(args):
dirname = os.path.dirname(os.path.abspath(__file__))+'/data/save/'
print(dirname)
ckpts_dir = os.path.join(dirname , 'checkpoints_{}_dual_in_single_out_persit'.format(args.task))
llprint("Loading Data ... ")
patient_records = pickle.load(open('./data/real_data/mimic/{}/patient_records_small.pkl'.format(args.task), 'rb'))
str2tok_diag, str2tok_drug, str2tok_proc\
= pickle.load(open('./data/real_data/mimic/{}/list_dict_str2token_no_adm.pkl'.format(args.task), 'rb'))
tok2str_diag, tok2str_proc, tok2str_drug\
= pickle.load(open('./data/real_data/mimic/{}/list_dict_token2str_no_adm.pkl'.format(args.task), 'rb'))
# tok2str_diag2 = pickle.load(open('./data/real_data/mimic/{}/dig_token2str_no_adm.pkl'.format(args.task), 'rb'))
# tok2str_drug2 = pickle.load(open('./data/real_data/mimic/{}/drug_token2str_no_adm.pkl'.format(args.task), 'rb'))
# tok2str_proc2 = pickle.load(open('./data/real_data/mimic/{}/pro_token2str_no_adm.pkl'.format(args.task), 'rb'))
llprint("Done!\n")
all_index = list(range(len(patient_records)))
train_index = all_index[:int(len(patient_records) * 2 / 3)]
valid_index = all_index[int(len(patient_records) * 2 / 3):int(len(patient_records) * 5 / 6)]
test_index = all_index[int(len(patient_records) * 5/6):int(len(patient_records) * 1)]
patient_list_train = [patient_records[i] for i in train_index]
patient_list_valid = [patient_records[i] for i in valid_index]
patient_list_test = [patient_records[i] for i in test_index]
print('num_patient {}'.format(len(patient_records)))
print('num train {}'.format(len(patient_list_train)))
print('num valid {}'.format(len(patient_list_valid)))
print('num test {}'.format(len(patient_list_test)))
print('dim in {} {}'.format(len(str2tok_diag), len(str2tok_drug)))
print('dim out {}'.format(len(str2tok_proc)))
batch_size = 1
input_size1 = len(str2tok_diag)
input_size2 = len(str2tok_drug)
output_size = len(str2tok_proc)
sequence_max_length = 100
words_count = args.mem_size
word_size = args.word_size
read_heads = args.read_heads
learning_rate = 1e-4
momentum = 0.9
from_checkpoint = None
iterations = args.iters
start_step = 0
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as session:
llprint("Building Computational Graph ... ")
if args.type=='no_cache':
ncomputer = Dual_DNC(
StatelessRecurrentController,
input_size1,
input_size2,
output_size,
words_count,
word_size,
read_heads,
batch_size,
use_mem=args.use_mem,
hidden_controller_dim=args.hidden_dim,
decoder_mode=False,
write_protect=True,
dual_emb=args.dual_emb,
emb_size=args.emb_size,
share_mem=args.share_mem,
use_teacher=args.use_teacher,
persist_mode=args.persist,
attend_dim=args.attend
)
else:
ncomputer = CachedDual_DNC(
CachedLSTMController,
input_size1,
input_size2,
output_size,
words_count,
word_size,
read_heads,
batch_size,
hidden_controller_dim=args.hidden_dim,
use_mem=args.use_mem,
decoder_mode=False,
write_protect=True,
dual_emb=args.dual_emb,
emb_size=args.emb_size,
share_mem=args.share_mem,
use_teacher=args.use_teacher,
persist_mode=args.persist,
)
multi=True
if multi:
output, prob, loss, apply_gradients = ncomputer.build_loss_function_multi_label()
else:
output, prob, loss, apply_gradients = ncomputer.build_loss_function()
llprint("Done!\n")
llprint("Initializing Variables ... ")
session.run(tf.global_variables_initializer())
llprint("Done!\n")
if args.from_checkpoint is not '':
if args.from_checkpoint=='default':
from_checkpoint = ncomputer.print_config()
else:
from_checkpoint = args.from_checkpoint
llprint("Restoring Checkpoint %s ... " % from_checkpoint)
ncomputer.restore(session, ckpts_dir, from_checkpoint)
llprint("Done!\n")
last_100_losses = []
start = 0 if start_step == 0 else start_step + 1
end = start_step + iterations + 1
if args.mode == 'test':
end = start
patient_list_valid = patient_list_test
start_time_100 = time.time()
avg_100_time = 0.
avg_counter = 0
if args.mode=='train':
log_dir = './data/summary/log_mimic_{}_dual_in_single_out_persit/'.format(args.task)
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
log_dir='./data/summary/log_mimic_{}_dual_in_single_out_persit/{}/'.format(args.task, ncomputer.print_config())
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
train_writer = tf.summary.FileWriter(log_dir, session.graph)
min_tloss=0
for i in range(start, end + 1):
try:
llprint("\rIteration %d/%d" % (i, end))
# ncomputer.clear_current_mem(session)
adms = \
prepare_mimic_sample_dual_persist(patient_list_train, input_size1, input_size2, output_size, multi=multi)
summerize = (i % args.eval_step == 0)
if args.mode == 'train':
ncomputer.clear_current_mem(session)
for adm in adms:
input_vec1, input_vec2, output_vec, seq_len, decoder_point, e1, e2, rout \
=adm
if len(rout) == 1 and rout[0] == 0:
continue
loss_value, _= session.run([
loss,
apply_gradients
], feed_dict={
ncomputer.input_data1: input_vec1,
ncomputer.input_data2: input_vec2,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.encode1_point: e1,
ncomputer.encode2_point: e2,
ncomputer.decoder_point: decoder_point,
ncomputer.clear_mem: False,
ncomputer.teacher_force: ncomputer.get_bool_rand_incremental(seq_len)
})
last_100_losses.append(loss_value)
tpre=0
if summerize:
llprint("\n\t episode %d -->Avg. Cross-Entropy: %.7f\n" % (i, np.mean(last_100_losses)))
summary = tf.Summary()
summary.value.add(tag='batch_train_loss', simple_value=np.mean(last_100_losses))
trscores=[]
for ii in range(10):
ncomputer.clear_current_mem(session)
adms = \
prepare_mimic_sample_dual_persist(patient_list_train, input_size1, input_size2,
output_size,multi=multi)
# ncomputer.clear_current_mem(session)
for adm in adms:
input_vec1, input_vec2, output_vec, seq_len, decoder_point, e1, e2, rout \
= adm
if len(rout)==1 and rout[0]==0:
continue
out = session.run([prob], feed_dict={
ncomputer.input_data1: input_vec1,
ncomputer.input_data2: input_vec2,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.encode1_point: e1,
ncomputer.encode2_point: e2,
ncomputer.decoder_point: decoder_point,
ncomputer.clear_mem: False,
ncomputer.teacher_force: ncomputer.get_bool_rand_incremental(seq_len, prob_true_max=0)
})
out = np.reshape(np.asarray(out),[-1, seq_len, output_size])
out_list=[]
# print('{} vs {}'.format(seq_len,out.shape[1]))
if multi:
out=np.argsort(out, axis=-1)
for io in range(len(rout)):
out_list.append(out[0][decoder_point][-io-1])
else:
out = np.argmax(out, axis=-1)
for io in range(decoder_point, out.shape[1]):
if out[0][io]<=1:
break
out_list.append(out[0][io])
pre,rec, jac=set_score_pre_jac(np.asarray([rout]),np.asarray([out_list]))
trscores.append(jac)
tescores = []
tescores2_1 = []
tescores2_2 = []
tescores2_3 = []
tescores2_5 = []
tescores2 = []
tescores3 = []
tescores4 = []
tescores5 = []
tescores6 = []
print('-----')
big_out_list=[]
losses=[]
single_best_loss=1000
best_mem_view=None
best_data=None
for ii in range(len(patient_list_valid)):
ncomputer.clear_current_mem(session)
adms = \
prepare_mimic_sample_dual_persist(patient_list_valid, input_size1, input_size2,
output_size, ii,multi=multi)
# ncomputer.clear_current_mem(session)
for adm in adms:
input_vec1, input_vec2, output_vec, seq_len, decoder_point, e1, e2, rout \
= adm
if len(rout)==1 and rout[0]==0:
continue
out, loss_v, mem_view = session.run([prob, loss, ncomputer.packed_memory_view], feed_dict={
ncomputer.input_data1: input_vec1,
ncomputer.input_data2: input_vec2,
ncomputer.target_output: output_vec,
ncomputer.sequence_length: seq_len,
ncomputer.encode1_point: e1,
ncomputer.encode2_point: e2,
ncomputer.decoder_point: decoder_point,
ncomputer.clear_mem: False,
ncomputer.teacher_force: ncomputer.get_bool_rand_incremental(seq_len, prob_true_max=0)
})
# print(np.argmax(target_output, axis=-1))
# print(out)
# print(np.max(out, axis=-1))
# print(weights)
losses.append(loss_v)
out = np.reshape(np.asarray(out), [-1, seq_len, output_size])
pout=out
pind = np.argsort(out, axis=-1)
out_list = []
unorder_predict1=[]
if multi:
io=1
while len(out_list)<len(rout):
if pind[0][decoder_point][-io]>1:
out_list.append(pind[0][decoder_point][-io])
unorder_predict1.append(pind[0][decoder_point][-io])
io+=1
unorder_predict1=unorder_predict1[::-1]
else:
if args.use_beam_search==0:
for io in range(decoder_point, seq_len):
c = 1
while 1:
label = pind[0][io][-c]
if label not in out_list:
out_list.append(label)
break
c += 1
else:
out_list = beam_search.leap_beam_search(pout[0][decoder_point:seq_len],
beam_size=args.use_beam_search,
is_set=True, is_fix_length=True)
prob_pre=[]
for ci,c in enumerate(out_list):
prob_pre.append(pout[0][decoder_point+ci][c])
unorder_predict1 = [x for _, x in sorted(zip(prob_pre, out_list))]
if args.mode=='test':
avg_loss_v=loss_v/len(rout)
if avg_loss_v<single_best_loss:
single_best_loss=avg_loss_v
best_mem_view=mem_view
best_data=[input_vec1, input_vec2, unorder_predict1[::-1], rout, decoder_point]
big_out_list.append(out_list)
# tescores.append(bleu_score(np.asarray([rout]), np.asarray([out_list])))
pre, rec, jac = set_score_pre_jac(np.asarray([rout]), np.asarray([out_list]))
# print(pout.shape)
auc,auc2=roc_auc(np.asarray([rout]),pout[:,decoder_point])
f1, f2 = fscore(np.asarray([rout]), np.asarray([out_list]), output_size)
tescores.append(jac)
tescores2.append(pre)
tescores3.append(auc)
tescores4.append(auc2)
tescores5.append(f1)
tescores6.append(f2)
# at 1
# if args.mode=='test':
# pre1=0
# for pr in unorder_predict1[-2:]:
# pre, rec = set_score_pre(np.asarray([rout]), np.asarray([[pr]]))
# pre1=max(pre1,pre)
# if pre1==1:
# break
# pre=pre1
# else:
pre, rec = set_score_pre(np.asarray([rout]), np.asarray([unorder_predict1[-1:]]))
tescores2_1.append(pre)
# at 2
pre, rec = set_score_pre(np.asarray([rout]), np.asarray([unorder_predict1[-2:]]))
tescores2_2.append(pre)
# at 3
pre, rec = set_score_pre(np.asarray([rout]), np.asarray([unorder_predict1[-3:]]))
tescores2_3.append(pre)
# at 5
pre, rec = set_score_pre(np.asarray([rout]), np.asarray([unorder_predict1[-5:]]))
tescores2_5.append(pre)
tloss=np.mean(losses)
tpre=np.mean(tescores2)
print('tr score {} vs te store {}'.format(np.mean(trscores),np.mean(tescores)))
print('test prec {} auc {} auc2 {} f1 {} f2 {}'.
format(np.mean(tescores2),
np.mean(tescores3), np.mean(tescores4),
np.mean(tescores5), np.mean(tescores6)))
print('test at 1 {}'.format(np.mean(tescores2_1)))
print('test at 2 {}'.format(np.mean(tescores2_2)))
print('test at 3 {}'.format(np.mean(tescores2_3)))
print('test at 5 {}'.format(np.mean(tescores2_5)))
print('test loss {}'.format(tloss))
if args.mode=='test':
print(best_mem_view['write_gates1'])
print('---')
print(best_mem_view['write_gates2'])
print('xxxx')
in1=convert_oh2raw(best_data[0][0], best_data[-1]-1)
in2=convert_oh2raw(best_data[1][0], best_data[-1]-1)
print(in1)
print(in2)
print(best_data[2])
print(best_data[3])
print('--translate---')
in12 = []
in22 = []
out12 = []
out22 = []
# for c in in1:
# if c>1:
# in12.append(tok2str_diag2[int(tok2str_diag[c])])
# for c in in2:
# if c>1:
# in22.append(tok2str_proc2[int(tok2str_proc[c])])
# for c in best_data[2]:
# if c > 1:
# out12.append(tok2str_drug2[int(tok2str_drug[c])])
# for c in best_data[3]:
# if c > 1:
# out22.append(tok2str_drug2[int(tok2str_drug[c])])
# print(in12)
# print(in22)
# print(out12)
# print(sorted(out22))
if args.mode=='train':
summary.value.add(tag='train_jac', simple_value=np.mean(trscores))
summary.value.add(tag='test_jac', simple_value=np.mean(tescores))
summary.value.add(tag='test_loss', simple_value=tloss)
summary.value.add(tag='test_recall', simple_value=np.mean(tescores3))
summary.value.add(tag='test_precision', simple_value=np.mean(tescores2))
train_writer.add_summary(summary, i)
train_writer.flush()
end_time_100 = time.time()
elapsed_time = (end_time_100 - start_time_100) / 60
avg_counter += 1
avg_100_time += (1. / avg_counter) * (elapsed_time - avg_100_time)
estimated_time = (avg_100_time * ((end - i) / 100.)) / 60.
print ("\tAvg. 100 iterations time: %.2f minutes" % (avg_100_time))
print ("\tApprox. time to completion: %.2f hours" % (estimated_time))
start_time_100 = time.time()
last_100_losses = []
if min_tloss<tpre:
min_tloss=tpre
if args.mode == 'train':
llprint("\nSaving Checkpoint ... ")
ncomputer.save(session, ckpts_dir, ncomputer.print_config())
llprint("Done!\n")
except KeyboardInterrupt:
sys.exit(0)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default="train")
parser.add_argument('--use_mem', default=False, type=str2bool)
parser.add_argument('--share_mem', default=True, type=str2bool)
parser.add_argument('--use_teacher', default=False, type=str2bool)
parser.add_argument('--persist', default=True, type=str2bool)
parser.add_argument('--word_size', default=64, type=int)
parser.add_argument('--mem_size', default=16, type=int)
parser.add_argument('--read_heads', default=1, type=int)
parser.add_argument('--emb_size', default=64, type=int)
parser.add_argument('--dual_emb', default=True, type=str2bool)
parser.add_argument('--attend', default=0, type=int)
parser.add_argument('--hidden_dim', default=64, type=int)
parser.add_argument('--iters', default=42000, type=int)
parser.add_argument('--eval_step', default=3000, type=int)
parser.add_argument('--type', default="no_cache")
parser.add_argument('--task', default="trim_diag_proc_drug_no_adm")
parser.add_argument('--from_checkpoint', default="")
parser.add_argument('--use_beam_search', default=0, type=int)
args = parser.parse_args()
print(args)
mimic_task_persit_real_dual(args)
| 44.574561
| 135
| 0.484076
|
4a194dec8f39b0df9605a7674a3878e246edd0a0
| 4,519
|
py
|
Python
|
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/create_function_version_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/create_function_version_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/create_function_version_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreateFunctionVersionRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'digest': 'str',
'version': 'str',
'description': 'str'
}
attribute_map = {
'digest': 'digest',
'version': 'version',
'description': 'description'
}
def __init__(self, digest=None, version=None, description=None):
"""CreateFunctionVersionRequestBody - a model defined in huaweicloud sdk"""
self._digest = None
self._version = None
self._description = None
self.discriminator = None
if digest is not None:
self.digest = digest
if version is not None:
self.version = version
if description is not None:
self.description = description
@property
def digest(self):
"""Gets the digest of this CreateFunctionVersionRequestBody.
md5键值
:return: The digest of this CreateFunctionVersionRequestBody.
:rtype: str
"""
return self._digest
@digest.setter
def digest(self, digest):
"""Sets the digest of this CreateFunctionVersionRequestBody.
md5键值
:param digest: The digest of this CreateFunctionVersionRequestBody.
:type: str
"""
self._digest = digest
@property
def version(self):
"""Gets the version of this CreateFunctionVersionRequestBody.
发布版本名称
:return: The version of this CreateFunctionVersionRequestBody.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this CreateFunctionVersionRequestBody.
发布版本名称
:param version: The version of this CreateFunctionVersionRequestBody.
:type: str
"""
self._version = version
@property
def description(self):
"""Gets the description of this CreateFunctionVersionRequestBody.
发布版本描述
:return: The description of this CreateFunctionVersionRequestBody.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateFunctionVersionRequestBody.
发布版本描述
:param description: The description of this CreateFunctionVersionRequestBody.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateFunctionVersionRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.739645
| 85
| 0.575791
|
4a194f90691328ac383b2597f95e95f2477e67ff
| 409
|
py
|
Python
|
python_random_is_all_about/test_platform/e2e/file_1.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 5
|
2018-05-03T08:16:02.000Z
|
2021-09-04T03:44:24.000Z
|
python_random_is_all_about/test_platform/e2e/file_1.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 1
|
2022-01-28T19:27:19.000Z
|
2022-01-28T19:27:19.000Z
|
python_random_is_all_about/test_platform/e2e/file_1.py
|
bflaven/BlogArticlesExamples
|
5df2dfc26170ffbbade78ba136bf3172391e3b2a
|
[
"MIT"
] | 2
|
2020-09-10T13:33:27.000Z
|
2022-02-09T11:07:38.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
"""
cd /Users/brunoflaven/Documents/02_copy/_random_is_all_about
python file_1.py
"""
import random
file='file_1.py'
# print('\n')
# print('You are in: ', file)
# print('\n')
def get_random_choice(input_array):
return random.choice(input_array)
print('\n')
print(file)
print(get_random_choice(["apple", "pear", "peach", "banana", "mango"]))
print('\n')
| 15.148148
| 71
| 0.662592
|
4a194f9b131f86dffb8c8a14af16cd8f80cf9523
| 2,787
|
py
|
Python
|
make_jyutping_corpus_freqs.py
|
gkovacs/fcitx-rime-config
|
b7845ff210de889e7b3418a48ce150d6edacd74d
|
[
"MIT"
] | 6
|
2018-08-25T07:49:06.000Z
|
2021-01-12T20:29:41.000Z
|
make_jyutping_corpus_freqs.py
|
gkovacs/fcitx-rime-config
|
b7845ff210de889e7b3418a48ce150d6edacd74d
|
[
"MIT"
] | null | null | null |
make_jyutping_corpus_freqs.py
|
gkovacs/fcitx-rime-config
|
b7845ff210de889e7b3418a48ce150d6edacd74d
|
[
"MIT"
] | 1
|
2021-01-24T03:44:51.000Z
|
2021-01-24T03:44:51.000Z
|
import sys
from collections import Counter
import json
from memoize import memoize
import pinyin
import jyutping
import pycantonese
corpus = pycantonese.hkcancor()
#from hanziconv import HanziConv
from opencc import OpenCC
s2hk = OpenCC('s2hk').convert
from mkdict import pinyin_to_zhuyin_real as pinyin_to_zhuyin
from mkdict import get_all_yue, get_merged_entries
def get_contents_in_dictionary(dictfile):
lines = open(dictfile).readlines()
output = []
is_started = False
for line in lines:
x = line.strip()
if x == '...':
is_started = True
continue
if not is_started:
continue
output.append(line)
return output
def get_word_and_pinyin_in_dictionary(dictfile):
output = []
for line in get_contents_in_dictionary(dictfile):
if '\t' not in line:
continue
output.append(line.split('\t')[:2])
return output
def list_dictionaries():
dictionaries = '''
- terra_pinyin
- terra_pinyin.extra_hanzi
- luna_pinyin.sgmain
- luna_pinyin.sgplus
- luna_pinyin.sgplus2
- luna_pinyin.chat
- luna_pinyin.net
- luna_pinyin.user
- luna_pinyin.website
- luna_pinyin.poetry
- luna_pinyin.computer
- luna_pinyin.place
- luna_pinyin.shopping
'''
dictionaries = dictionaries.split('-')
dictionaries = [x.strip() for x in dictionaries]
dictionaries = [x for x in dictionaries if x != '']
return dictionaries
def get_word_list():
output = []
output_set = set()
dictionaries = list_dictionaries()
dictionaries.append('leimaau_jyutping')
for dictionary_name in dictionaries:
dictfile = dictionary_name + '.dict.yaml'
for item in get_word_and_pinyin_in_dictionary(dictfile):
word = item[0]
if word not in output_set:
output_set.add(word)
output.append(word)
for item in get_merged_entries():
word = item['trad']
if word not in output_set:
output_set.add(word)
output.append(word)
word = item['simp']
if word not in output_set:
output_set.add(word)
output.append(word)
return output
def get_word_to_jyutping_corpus_freq(word):
#trad = HanziConv.toTraditional(word)
trad = s2hk(word)
search_results = corpus.search(character=trad)
output = Counter()
for x in search_results:
char = x[0]
jyut = x[2]
if char != trad:
continue
output[jyut] += 1
return output
def main():
output = {}
seen = set()
word_list = get_word_list()
for idx,word in enumerate(word_list):
if word in seen:
continue
if idx % 100 == 0:
print(word, idx, '/', len(word_list))
seen.add(word)
corpus_freq = get_word_to_jyutping_corpus_freq(word)
if len(corpus_freq.keys()) > 0:
output[word] = corpus_freq
json.dump(output, open('jyutping_corpus_freq.json', 'wt'))
main()
| 25.108108
| 60
| 0.694295
|
4a1950519c01a40cfd18e649cd91e79234138faf
| 26,029
|
py
|
Python
|
resolutionwr.py
|
colinquirk/PsychopyResolutionWR
|
ef028887b3e3b85c1fa8872aa273b869b5f63593
|
[
"MIT"
] | 4
|
2020-03-21T00:27:33.000Z
|
2021-12-29T20:30:39.000Z
|
resolutionwr.py
|
colinquirk/PsychopyResolutionWR
|
ef028887b3e3b85c1fa8872aa273b869b5f63593
|
[
"MIT"
] | null | null | null |
resolutionwr.py
|
colinquirk/PsychopyResolutionWR
|
ef028887b3e3b85c1fa8872aa273b869b5f63593
|
[
"MIT"
] | 1
|
2021-12-07T19:26:09.000Z
|
2021-12-07T19:26:09.000Z
|
"""An estimation whole report experiment.
Author - Colin Quirk (cquirk@uchicago.edu)
Repo: https://github.com/colinquirk/PsychopyResolutionWR
This is a working memory paradigm adapted from Adam, Vogel, Awh (2017) with minor differences.
This code can either be used directly or it can be inherited and extended.
If this file is run directly the defaults at the top of the page will be
used. To make simple changes, you can adjust any of these files.
For more in depth changes you will need to overwrite the methods yourself.
Note: this code relies on my templateexperiments module. You can get
it from https://github.com/colinquirk/templateexperiments and either put it in the
same folder as this code or give the path to psychopy in the preferences.
Classes:
ResolutionWR -- The class that runs the experiment.
See 'print ResolutionWR.__doc__' for simple class docs or help(ResolutionWR) for everything.
"""
import copy
import errno
import json
import math
import os
import random
import sys
import numpy as np
import psychopy
import template as template
# Things you probably want to change
set_sizes = [1, 2, 4, 6]
trials_per_set_size = 5 # per block
number_of_blocks = 2
iti_time = 1
sample_time = 2
delay_time = 1
monitor_distance = 90
experiment_name = 'ResolutionWR'
data_directory = os.path.join(
os.path.expanduser('~'), 'Desktop', experiment_name, 'Data')
instruct_text = [
('Welcome to the experiment. Press space to begin.'),
('In this experiment you will be remembering colors.\n\n'
'Each trial will start with a blank screen.\n'
'Then, a number of circles with different colors will appear.\n'
'Remember as many colors as you can.\n\n'
'After a short delay, color wheels will appear.\n\n'
'Match the color wheel to the color that appeared in that position.\n'
'Click the mouse button until the wheel disappears.\n'
'If you are not sure, just take your best guess.\n\n'
'You will get breaks in between blocks.\n\n'
'Press space to start.'),
]
# Things you probably don't need to change, but can if you want to
colorwheel_path = 'colors.json'
distance_from_fixation = 6 # visual degrees
stim_size = 1.5 # visual degrees
min_color_dist = 25 # should be > 360 / max(set_sizes)
data_fields = [
'Subject',
'Session',
'Block',
'Trial',
'LocationNumber',
'ClickNumber',
'Timestamp',
'SetSize',
'LocationX',
'LocationY',
'ColorIndex',
'TrueColor',
'RespColor',
'Error',
'RT',
]
gender_options = [
'Male',
'Female',
'Other/Choose Not To Respond',
]
hispanic_options = [
'Yes, Hispanic or Latino/a',
'No, not Hispanic or Latino/a',
'Choose Not To Respond',
]
race_options = [
'American Indian or Alaskan Native',
'Asian',
'Pacific Islander',
'Black or African American',
'White / Caucasian',
'More Than One Race',
'Choose Not To Respond',
]
# Add additional questions here
questionaire_dict = {
'Session': 1,
'Age': 0,
'Gender': gender_options,
'Hispanic:': hispanic_options,
'Race': race_options,
}
# This is the logic that runs the experiment
# Change anything below this comment at your own risk
psychopy.logging.console.setLevel(psychopy.logging.CRITICAL) # Avoid error output
class ResolutionWR(template.BaseExperiment):
"""
The class that runs the whole report estimation experiment.
Parameters:
colorwheel_path -- A string or Path describing the location of a json file containing
a 360 length array of length 3 rgb arrays.
data_directory -- Where the data should be saved.
delay_time -- The number of seconds between the stimuli display and test.
distance_from_fixation -- A number describing how far from fixation stimuli will
appear in visual degrees.
instruct_text -- The text to be displayed to the participant at the beginning of the experiment.
iti_time -- The number of seconds in between a response and the next trial.
min_color_dist -- The minimum number of degrees in color space between display items.
number_of_blocks -- The number of blocks in the experiment.
questionaire_dict -- Questions to be included in the dialog.
sample_time -- The number of seconds the stimuli are on the screen for.
set_sizes -- A list of all the set sizes.
An equal number of trials will be shown for each set size.
stim_size -- The size of the stimuli in visual degrees.
trials_per_set_size -- The number of trials per set size per block.
Methods:
calculate_locations -- Calculates locations for the upcoming trial with random jitter.
calculate_error -- Calculates error in a response compared to the true color value.
chdir -- Changes the directory to where the data will be saved.
display_blank -- Displays a blank screen.
display_break -- Displays a screen during the break between blocks.
display_stimuli -- Displays the stimuli.
draw_color_wheels -- Draws color wheels at stimuli locations with random rotation.
generate_color_indexes -- Generates colors for a trial given the minimum distance.
get_response -- Manages getting responses for all color wheels.
make_block -- Creates a list of trials to be run.
make_trial -- Creates a single trial dictionary.
run -- Runs the entire experiment including optional hooks.
run_trial -- Runs a single trial.
send_data -- Updates the experiment data with the information from the last trial.
"""
def __init__(self, set_sizes=set_sizes, trials_per_set_size=trials_per_set_size,
number_of_blocks=number_of_blocks, distance_from_fixation=distance_from_fixation,
min_color_dist=min_color_dist, colorwheel_path=colorwheel_path, stim_size=stim_size,
iti_time=iti_time, sample_time=sample_time, delay_time=delay_time,
data_directory=data_directory, questionaire_dict=questionaire_dict,
instruct_text=instruct_text, **kwargs):
self.set_sizes = set_sizes
self.trials_per_set_size = trials_per_set_size
self.number_of_blocks = number_of_blocks
self.distance_from_fixation = distance_from_fixation
self.stim_size = stim_size
self.questionaire_dict = questionaire_dict
self.data_directory = data_directory
self.instruct_text = instruct_text
self.min_color_dist = min_color_dist
self.iti_time = iti_time
self.sample_time = sample_time
self.delay_time = delay_time
self.color_wheel = self._load_color_wheel(colorwheel_path)
self.mouse = None
super().__init__(**kwargs)
def save_experiment_info(self, filename=None):
"""Writes the info from the dialog box to a json file.
This method overwrites the base method in order to include the session number in the filename.
Parameters:
filename -- a string defining the filename with no extension
"""
ext = '.json'
if filename is None:
filename = (self.experiment_name + '_' +
self.experiment_info['Subject Number'].zfill(3) + '_' +
str(self.experiment_info['Session']).zfill(3) +
'_info')
elif filename[-5:] == ext:
filename = filename[:-5]
if os.path.isfile(filename + ext):
if self.overwrite_ok is None:
self.overwrite_ok = self._confirm_overwrite()
if not self.overwrite_ok:
# If the file exists make a new filename
i = 1
new_filename = filename + '(' + str(i) + ')'
while os.path.isfile(new_filename + ext):
i += 1
new_filename = filename + '(' + str(i) + ')'
filename = new_filename
filename = filename + ext
with open(filename, 'w') as info_file:
info_file.write(json.dumps(self.experiment_info))
def open_csv_data_file(self, data_filename=None):
"""Opens the csv file and writes the header.
This method overwrites the base method in order to include the session number in the filename.
Parameters:
data_filename -- name of the csv file with no extension
(defaults to experimentname_subjectnumber).
"""
if data_filename is None:
data_filename = (self.experiment_name + '_' +
self.experiment_info['Subject Number'].zfill(3) + '_' +
str(self.experiment_info['Session']).zfill(3))
elif data_filename[-4:] == '.csv':
data_filename = data_filename[:-4]
if os.path.isfile(data_filename + '.csv'):
if self.overwrite_ok is None:
self.overwrite_ok = self._confirm_overwrite()
if not self.overwrite_ok:
# If the file exists and we can't overwrite make a new filename
i = 1
new_filename = data_filename + '(' + str(i) + ')'
while os.path.isfile(new_filename + '.csv'):
i += 1
new_filename = data_filename + '(' + str(i) + ')'
data_filename = new_filename
self.experiment_data_filename = data_filename + '.csv'
# Write the header
with open(self.experiment_data_filename, 'w+') as data_file:
for field in self.data_fields:
data_file.write('"')
data_file.write(field)
data_file.write('"')
if field != self.data_fields[-1]:
data_file.write(',')
data_file.write('\n')
def chdir(self):
"""Changes the directory to where the data will be saved."""
try:
os.makedirs(self.data_directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
os.chdir(self.data_directory)
def _load_color_wheel(self, path):
"""
Loads the json color wheel file.
Parameters:
path -- Str or Path of the json file.
"""
with open(path) as f:
color_wheel = json.load(f)
color_wheel = [template.convert_color_value(i) for i in color_wheel]
return np.array(color_wheel)
def calculate_locations(self, set_size):
"""
Calculates locations for the upcoming trial with random jitter.
Parameters:
set_size -- The number of locations to return.
"""
angle_dist = 360 / set_size
rotation = random.randint(0, angle_dist - 1)
angles = [int(i * angle_dist + rotation + random.randint(-5, 5)) for i in range(set_size)]
locations = [(self.distance_from_fixation * math.cos(math.radians(i)),
self.distance_from_fixation * math.sin(math.radians(i)))
for i in angles]
return locations
def _check_dist(self, attempt, colors):
"""
Checks if a color attempt statistfies the distance condition.
Parameters:
attempt -- The color index to be checked.
colors -- The list of color indexes to be checked against.
"""
for c in colors:
raw_dist = abs(c - attempt)
dist = min(raw_dist, 360 - raw_dist)
if dist < self.min_color_dist:
return False
return True
def generate_color_indexes(self, set_size):
"""
Generates colors for a trial given the minimum distance.
Parameters:
set_size -- The number of colors to generate.
"""
colors = []
while len(colors) < set_size:
attempt = random.randint(0, 359)
if self._check_dist(attempt, colors):
colors.append(attempt)
return colors
def make_trial(self, set_size):
"""
Creates a single trial dictionary.
Parameters:
set_size -- The number of items to be displayed.
"""
color_indexes = self.generate_color_indexes(set_size)
color_values = [self.color_wheel[i] for i in color_indexes]
wheel_rotations = [random.randint(0, 359) for _ in range(set_size)]
locations = self.calculate_locations(set_size)
trial = {
'set_size': set_size,
'color_indexes': color_indexes,
'color_values': color_values,
'wheel_rotations': wheel_rotations,
'locations': locations
}
return trial
def make_block(self):
"""Makes a block of trials.
Returns a shuffled list of trials created by self.make_trial.
"""
trial_list = []
for set_size in self.set_sizes:
for _ in range(self.trials_per_set_size):
trial = self.make_trial(set_size)
trial_list.append(trial)
random.shuffle(trial_list)
return trial_list
def display_blank(self, wait_time):
"""
Displays a blank screen.
Parameters:
wait_time -- The number of seconds to display the blank for.
"""
self.experiment_window.flip()
psychopy.core.wait(wait_time)
def display_stimuli(self, coordinates, colors):
"""
Displays the stimuli.
Parameters:
coordinates -- A list of (x, y) tuples in visual degrees.
colors -- A list of -1 to 1 rgb color lists
"""
for pos, color in zip(coordinates, colors):
psychopy.visual.Circle(
self.experiment_window, radius=self.stim_size, pos=pos, fillColor=color,
units='deg', lineColor=None).draw()
self.experiment_window.flip()
psychopy.core.wait(self.sample_time)
def draw_color_wheels(self, coordinates, wheel_rotations):
"""
Draws color wheels at stimuli locations with random rotation.
Parameters:
coordinates -- A list of (x, y) tuples
wheel_rotations -- A list of 0:359 ints describing how much each wheel
should be rotated.
"""
mask = np.zeros([100, 1])
mask[-30:] = 1
for pos, rot in zip(coordinates, wheel_rotations):
rotated_wheel = np.roll(self.color_wheel, rot, axis=0)
tex = np.repeat(rotated_wheel[np.newaxis, :, :], 360, 0)
psychopy.visual.RadialStim(
self.experiment_window, tex=tex, mask=mask, pos=pos, angularRes=256,
angularCycles=1, interpolate=False, size=self.stim_size * 2).draw()
def _calc_mouse_color(self, mouse_pos):
"""
Calculates the color of the pixel the mouse is hovering over.
Parameters:
mouse_pos -- A position returned by mouse.getPos()
"""
frame = np.array(self.experiment_window._getFrame()) # Uses psychopy internal function
x_correction = self.experiment_window.size[0] / 2
y_correction = self.experiment_window.size[1] / 2
x = int(psychopy.tools.monitorunittools.deg2pix(mouse_pos[0], self.experiment_monitor) + x_correction)
y = (self.experiment_window.size[1] -
int(psychopy.tools.monitorunittools.deg2pix(mouse_pos[1], self.experiment_monitor) + y_correction))
try:
color = frame[y, x, :]
except IndexError:
color = None
return color
def _calc_mouse_position(self, coordinates, mouse_pos):
"""
Determines which position is closest to the mouse in order to display the hover preview.
Parameters:
coordinates -- A list of (x, y) tuples
mouse_pos -- A position returned by mouse.getPos()
"""
dists = [np.linalg.norm(np.array(i) - np.array(mouse_pos) / 2) for i in coordinates]
closest_dist = min(dists)
if closest_dist < 4:
return coordinates[np.argmin(dists)]
else:
return None
def _response_loop(self, coordinates, wheel_rotations):
"""
Handles the hover updating and response clicks
Slightly slow due to how psychopy handles clicks, so a full click and hold is needed.
Parameters:
coordinates -- A list of (x, y) tuples
wheel_rotations -- A list of 0:359 ints describing how much each wheel
should be rotated.
"""
temp_coordinates = copy.copy(coordinates)
temp_rotations = copy.copy(wheel_rotations)
resp_colors = [0] * len(coordinates)
rts = [0] * len(coordinates)
click_order = [0] * len(coordinates)
click = 1
self.mouse.clickReset()
self.draw_color_wheels(temp_coordinates, temp_rotations)
self.experiment_window.flip()
while True:
if psychopy.event.getKeys(keyList=['q']):
self.quit_experiment()
(lclick, _, _), (rt, _, _) = self.mouse.getPressed(getTime=True)
mouse_pos = self.mouse.getPos()
px_color = self._calc_mouse_color(mouse_pos)
if px_color is not None and not np.array_equal(px_color, np.array([128, 128, 128])):
preview_pos = self._calc_mouse_position(temp_coordinates, mouse_pos)
if preview_pos:
if lclick:
resp_colors[coordinates.index(preview_pos)] = px_color
rts[coordinates.index(preview_pos)] = rt
click_order[coordinates.index(preview_pos)] = click
click += 1
del temp_rotations[temp_coordinates.index(preview_pos)]
temp_coordinates.remove(preview_pos)
if not temp_coordinates:
return resp_colors, rts, click_order
else:
psychopy.visual.Circle(
self.experiment_window, radius=self.stim_size / 2, pos=preview_pos,
fillColor=template.convert_color_value(px_color), units='deg',
lineColor=None).draw()
self.draw_color_wheels(temp_coordinates, temp_rotations)
self.experiment_window.flip()
def get_response(self, coordinates, wheel_rotations):
"""
Manages getting responses for all color wheels.
Parameters:
coordinates -- A list of (x, y) tuples
wheel_rotations -- A list of 0:359 ints describing how much each wheel
should be rotated.
"""
if not self.mouse:
self.mouse = psychopy.event.Mouse(visible=False, win=self.experiment_window)
self.mouse.setVisible(1)
psychopy.event.clearEvents()
resp_colors, rts, click_order = self._response_loop(coordinates, wheel_rotations)
self.mouse.setVisible(0)
return resp_colors, rts, click_order
def calculate_error(self, color_index, resp_color):
"""
Calculates error in a response compared to the true color value.
Parameters:
color_index -- The index of the true color values (0:359).
resp_color -- The rgb color that was selected.
"""
row_index = np.where((self.color_wheel == resp_color).all(axis=1))[0]
if row_index.shape[0] < 1:
return None # if empty, return None
raw_error = row_index[0] - color_index
if raw_error >= -180 and raw_error <= 180:
error = raw_error
elif raw_error < -180:
error = 360 + raw_error
else:
error = raw_error - 360
return error
def send_data(self, data):
"""Updates the experiment data with the information from the last trial.
This function is seperated from run_trial to allow additional information to be added
afterwards.
Parameters:
data -- A dict where keys exist in data_fields and values are to be saved.
"""
self.update_experiment_data(data)
def run_trial(self, trial, block_num, trial_num):
"""
Runs a single trial.
Parameters:
trial -- A dictionary returned by make_trial().
block_num -- The block number to be saved in the output csv.
trial_num -- The trial number to be saved in the output csv.
"""
self.display_blank(self.iti_time)
self.display_stimuli(trial['locations'], trial['color_values'])
self.display_blank(self.delay_time)
resp_colors, rts, click_order = self.get_response(trial['locations'], trial['wheel_rotations'])
data = []
timestamp = psychopy.core.getAbsTime()
for i, (color, rt, click) in enumerate(zip(resp_colors, rts, click_order)):
data.append({
'Subject': self.experiment_info['Subject Number'],
'Session': self.experiment_info['Session'],
'Block': block_num,
'Trial': trial_num,
'LocationNumber': i + 1,
'ClickNumber': click,
'Timestamp': timestamp,
'SetSize': trial['set_size'],
'LocationX': trial['locations'][i][0],
'LocationY': trial['locations'][i][1],
'ColorIndex': trial['color_indexes'][i],
'TrueColor': trial['color_values'][i],
'RespColor': template.convert_color_value(color),
'Error': self.calculate_error(trial['color_indexes'][i], template.convert_color_value(color)),
'RT': rt,
})
return data
def display_break(self):
"""Displays a break screen in between blocks."""
break_text = 'Please take a short break. Press space to continue.'
self.display_text_screen(text=break_text, bg_color=[204, 255, 204])
def run(self, setup_hook=None, before_first_trial_hook=None, pre_block_hook=None,
pre_trial_hook=None, post_trial_hook=None, post_block_hook=None,
end_experiment_hook=None):
"""Runs the entire experiment.
This function takes a number of hooks that allow you to alter behavior of the experiment
without having to completely rewrite the run function. While large changes will still
require you to create a subclass, small changes like adding a practice block or
performance feedback screen can be implimented using these hooks. All hooks take in the
experiment object as the first argument. See below for other parameters sent to hooks.
Parameters:
setup_hook -- takes self, executed once the window is open.
before_first_trial_hook -- takes self, executed after instructions are displayed.
pre_block_hook -- takes self, block list, and block num
Executed immediately before block start.
Can optionally return an altered block list.
pre_trial_hook -- takes self, trial dict, block num, and trial num
Executed immediately before trial start.
Can optionally return an altered trial dict.
post_trial_hook -- takes self and the trial data, executed immediately after trial end.
Can optionally return altered trial data to be stored.
post_block_hook -- takes self, executed at end of block before break screen (including
last block).
end_experiment_hook -- takes self, executed immediately before end experiment screen.
"""
self.chdir()
ok = self.get_experiment_info_from_dialog(self.questionaire_dict)
if not ok:
print('Experiment has been terminated.')
sys.exit(1)
self.save_experiment_info()
self.open_csv_data_file()
self.open_window(screen=0)
self.display_text_screen('Loading...', wait_for_input=False)
if setup_hook is not None:
setup_hook(self)
for instruction in self.instruct_text:
self.display_text_screen(text=instruction)
if before_first_trial_hook is not None:
before_first_trial_hook(self)
for block_num in range(self.number_of_blocks):
block = self.make_block()
if pre_block_hook is not None:
tmp = pre_block_hook(self, block, block_num)
if tmp is not None:
block = tmp
for trial_num, trial in enumerate(block):
if pre_trial_hook is not None:
tmp = pre_trial_hook(self, trial, block_num, trial_num)
if tmp is not None:
trial = tmp
data = self.run_trial(trial, block_num, trial_num)
if post_trial_hook is not None:
tmp = post_trial_hook(self, data)
if tmp is not None:
data = tmp
self.send_data(data)
self.save_data_to_csv()
if post_block_hook is not None:
post_block_hook(self)
if block_num + 1 != self.number_of_blocks:
self.display_break()
if end_experiment_hook is not None:
end_experiment_hook(self)
self.display_text_screen(
'The experiment is now over, please get your experimenter.',
bg_color=[0, 0, 255], text_color=[255, 255, 255])
self.quit_experiment()
# If you call this script directly, the task will run with your defaults
if __name__ == '__main__':
exp = ResolutionWR(
# BaseExperiment parameters
experiment_name=experiment_name,
data_fields=data_fields,
monitor_distance=monitor_distance,
# Custom parameters go here
)
exp.run()
| 35.705075
| 112
| 0.618195
|
4a1950c06395988bec3e4292af4b31453c8589df
| 6,450
|
py
|
Python
|
ATT_train_and_test_comments.py
|
tanyakhandelwal21/DefectDetectionModels
|
b8404a5a1f6876fc402ce37812c084c0c9aca883
|
[
"MIT"
] | null | null | null |
ATT_train_and_test_comments.py
|
tanyakhandelwal21/DefectDetectionModels
|
b8404a5a1f6876fc402ce37812c084c0c9aca883
|
[
"MIT"
] | null | null | null |
ATT_train_and_test_comments.py
|
tanyakhandelwal21/DefectDetectionModels
|
b8404a5a1f6876fc402ce37812c084c0c9aca883
|
[
"MIT"
] | null | null | null |
#import statements from ATT model
from collections import defaultdict
import re
from bs4 import BeautifulSoup
import sys
import os
from bs4 import BeautifulSoup
import sys
import os
os.environ['KERAS_BACKEND']='tensorflow'
from tensorflow.keras.preprocessing.text import Tokenizer, text_to_word_sequence
from tensorflow.keras import utils
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Dense, Input, Flatten
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Embedding, Concatenate, Dropout, LSTM, GRU, Bidirectional, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Attention
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer, InputSpec
from tensorflow.keras import initializers
#import from OGmodel
import pickle
import pandas as pd
import numpy as np
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.utils import Sequence
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
MAX_SENT_LENGTH = 100
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
with open('data/y_train.pickle', 'rb') as handle:
Y_train = pickle.load(handle)
with open('data/y_test.pickle', 'rb') as handle:
Y_test = pickle.load(handle)
with open('data/y_valid.pickle', 'rb') as handle:
Y_valid = pickle.load(handle)
with open('data/x_train.pickle', 'rb') as handle:
X_train = pickle.load(handle)
with open('data/x_test.pickle', 'rb') as handle:
X_test = pickle.load(handle)
with open('data/x_valid.pickle', 'rb') as handle:
X_valid = pickle.load(handle)
with open('data/vocab_set.pickle', 'rb') as handle:
vocabulary_set = pickle.load(handle)
X_train = X_train[:50000]
Y_train = Y_train[:50000]
X_test = X_test[:25000]
Y_test = Y_test[:25000]
X_valid = X_valid[:25000]
Y_valid = Y_valid[:25000]
# Encode training, valid and test instances
encoder = tfds.features.text.TokenTextEncoder(vocabulary_set)
# Model Definition
class AttLayer(Layer):
def __init__(self, attention_dim):
self.init = initializers.get('normal')
self.supports_masking = True
self.attention_dim = attention_dim
super(AttLayer, self).__init__()
def build(self, input_shape):
assert len(input_shape) == 3
self.W = K.variable(self.init((input_shape[-1], self.attention_dim)), name='W')
self.b = K.variable(self.init((self.attention_dim, )), name='b')
self.u = K.variable(self.init((self.attention_dim, 1)), name='u')
self.tw = [self.W, self.b, self.u]
super(AttLayer, self).build(input_shape)
def compute_mask(self, inputs, mask=None):
return None
def call(self, x, mask=None):
# size of x :[batch_size, sel_len, attention_dim]
# size of u :[batch_size, attention_dim]
# uit = tanh(xW+b)
uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
ait = K.dot(uit, self.u)
ait = K.squeeze(ait, -1)
ait = K.exp(ait)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
ait *= K.cast(mask, K.floatx())
ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
ait = K.expand_dims(ait)
weighted_input = x * ait
output = K.sum(weighted_input, axis=1)
return output
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
inputs = Input(shape=(None,), dtype='int32')
embedding = Embedding(encoder.vocab_size, 128)(inputs)
l_lstm = Bidirectional(LSTM(64, return_sequences=True))(embedding)
att = Attention()([embedding,l_lstm])
l_lstm2 = Bidirectional(LSTM(64))(embedding)
preds = Dense(1, activation='sigmoid')(l_lstm2)
model = Model(inputs, preds)
#model = tf.keras.Sequential([
# tf.keras.layers.Embedding(encoder.vocab_size, 64),
# tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True)),
# tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)),
# tf.keras.layers.Dense(64, activation='relu'),
# tf.keras.layers.Dropout(0.5),
# tf.keras.layers.Dense(1, activation='sigmoid')
#])
model.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(1e-4),
metrics=['acc'])
model.summary()
batch_size = 16
# Building generators
class CustomGenerator(Sequence):
def __init__(self, text, labels, batch_size, num_steps=None):
self.text, self.labels = text, labels
self.batch_size = batch_size
self.len = np.ceil(len(self.text) / float(self.batch_size)).astype(np.int64)
if num_steps:
self.len = min(num_steps, self.len)
def __len__(self):
return self.len
def __getitem__(self, idx):
batch_x = self.text[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.labels[idx * self.batch_size:(idx + 1) * self.batch_size]
return batch_x, batch_y
train_gen = CustomGenerator(X_train, Y_train, batch_size)
valid_gen = CustomGenerator(X_valid, Y_valid, batch_size)
test_gen = CustomGenerator(X_test, Y_test, batch_size)
# Training the model
checkpointer = ModelCheckpoint('data/models/model-{epoch:02d}-{val_loss:.5f}.hdf5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min')
callback_list = [checkpointer] #, , reduce_lr
his1 = model.fit_generator(
generator=train_gen,
epochs=1,
validation_data=valid_gen)
predIdxs = model.predict_generator(test_gen, verbose=1)
fpr, tpr, _ = roc_curve(Y_test, predIdxs)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.savefig('auc_model.png')
| 32.41206
| 132
| 0.67876
|
4a1951e9e58a63103c1b73625a0b80322c35681f
| 1,533
|
py
|
Python
|
Course_3/Week_03/edit_distance.py
|
KnightZhang625/Stanford_Algorithm
|
7dacbbfa50e7b0e8380cf500df24af60cb9f42df
|
[
"Apache-2.0"
] | null | null | null |
Course_3/Week_03/edit_distance.py
|
KnightZhang625/Stanford_Algorithm
|
7dacbbfa50e7b0e8380cf500df24af60cb9f42df
|
[
"Apache-2.0"
] | 1
|
2020-07-16T08:03:22.000Z
|
2020-07-16T08:09:34.000Z
|
Course_3/Week_03/edit_distance.py
|
KnightZhang625/Stanford_Algorithm
|
7dacbbfa50e7b0e8380cf500df24af60cb9f42df
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
def calculateDistance(str_a, str_b):
length_a = len(str_a)
length_b = len(str_b)
distance = [[0 for _ in range(length_b+1)] for _ in range(length_a+1)]
batchtrace = [[None for _ in range(length_b+1)] for _ in range(length_a+1)]
for i in range(length_a+1):
for j in range(length_b+1):
if i == 0:
distance[i][j] = j
elif j == 0:
distance[i][j] = i
else:
if str_a[i-1] == str_b[j-1]:
distance[i][j] = distance[i-1][j-1]
batchtrace[i][j] = ((i-1, j-1), 'no action')
else:
insert_dist = distance[i][j-1]
delete_dist = distance[i-1][j]
replace_dist = distance[i-1][j-1]
dist_cands = [insert_dist, delete_dist, replace_dist]
choose_dist = min(dist_cands)
select = dist_cands.index(choose_dist)
distance[i][j] = choose_dist + 1
if select == 0:
batchtrace[i][j] = ((i, j-1), 'insert {}'.format(str_b[j-1]))
elif select == 1:
batchtrace[i][j] = ((i-1, j), 'delete {}'.format(str_a[i-1]))
else:
batchtrace[i][j] = ((i-1, j-1), 'replace {}{}'.format(str_a[i-1], str_b[j-1]))
p_prev = batchtrace[-1][-1]
path = []
while p_prev is not None:
path.append(p_prev[1])
i, j = p_prev[0][0], p_prev[0][1]
p_prev = batchtrace[i][j]
return distance[-1][-1], list(reversed(path))
if __name__ == '__main__':
dist, batchtrace = calculateDistance('ABC', 'ACD')
print(dist)
print(batchtrace)
| 32.617021
| 90
| 0.553164
|
4a1953a12321b2958a023c841a29429ee9e156af
| 547
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/scatter/marker/colorbar/_tick0.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/scatter/marker/colorbar/_tick0.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/scatter/marker/colorbar/_tick0.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class Tick0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="tick0", parent_name="scatter.marker.colorbar", **kwargs
):
super(Tick0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
role=kwargs.pop("role", "style"),
**kwargs
)
| 34.1875
| 82
| 0.636197
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.