text
stringlengths 2
999k
|
|---|
from great_expectations.render.renderer.content_block.content_block import (
ContentBlockRenderer,
)
from great_expectations.render.types import (
RenderedBulletListContent,
RenderedStringTemplateContent,
)
class ExceptionListContentBlockRenderer(ContentBlockRenderer):
"""Render a bullet list of exception messages raised for provided EVRs"""
_rendered_component_type = RenderedBulletListContent
_content_block_type = "bullet_list"
_default_header = 'Failed expectations <span class="mr-3 triangle"></span>'
_default_content_block_styling = {
"classes": ["col-12"],
"styles": {"margin-top": "20px"},
"header": {
"classes": ["collapsed"],
"attributes": {
"data-toggle": "collapse",
"href": "#{{content_block_id}}-body",
"role": "button",
"aria-expanded": "true",
"aria-controls": "collapseExample",
},
"styles": {
"cursor": "pointer",
},
},
"body": {
"classes": ["list-group", "collapse"],
},
}
_default_element_styling = {
"classes": [
"list-group-item"
], # "d-flex", "justify-content-between", "align-items-center"],
"params": {
"column": {"classes": ["badge", "badge-primary"]},
"expectation_type": {"classes": ["text-monospace"]},
"exception_message": {"classes": ["text-monospace"]},
},
}
@classmethod
def render(cls, render_object, **kwargs):
return super().render(
render_object=render_object, exception_list_content_block=True
)
@classmethod
def _missing_content_block_fn(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
# Only render EVR objects for which an exception was raised
if result.exception_info["raised_exception"] is True:
template_str = "$expectation_type raised an exception: $exception_message"
if include_column_name:
template_str = f"$column: {template_str}"
try:
column = result.expectation_config.kwargs["column"]
except KeyError:
column = None
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": {
"column": column,
"expectation_type": result.expectation_config.expectation_type,
"exception_message": result.exception_info[
"exception_message"
],
},
"styling": styling,
},
}
)
]
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import threading
import base64
from functools import partial
import smtplib
import imaplib
import email
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.encoders import encode_base64
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtGui as QtGui
from PyQt5.QtWidgets import (QVBoxLayout, QLabel, QGridLayout, QLineEdit)
from electrum_dash.plugins import BasePlugin, hook
from electrum_dash.paymentrequest import PaymentRequest
from electrum_dash.i18n import _
from electrum_dash_gui.qt.util import EnterButton, Buttons, CloseButton
from electrum_dash_gui.qt.util import OkButton, WindowModalDialog
class Processor(threading.Thread):
polling_interval = 5*60
def __init__(self, imap_server, username, password, callback):
threading.Thread.__init__(self)
self.daemon = True
self.username = username
self.password = password
self.imap_server = imap_server
self.on_receive = callback
def poll(self):
try:
self.M.select()
except:
return
typ, data = self.M.search(None, 'ALL')
for num in data[0].split():
typ, msg_data = self.M.fetch(num, '(RFC822)')
msg = email.message_from_string(msg_data[0][1])
p = msg.get_payload()
if not msg.is_multipart():
p = [p]
continue
for item in p:
if item.get_content_type() == "application/dash-paymentrequest":
pr_str = item.get_payload()
pr_str = base64.b64decode(pr_str)
self.on_receive(pr_str)
def run(self):
self.M = imaplib.IMAP4_SSL(self.imap_server)
self.M.login(self.username, self.password)
while True:
self.poll()
time.sleep(self.polling_interval)
self.M.close()
self.M.logout()
def send(self, recipient, message, payment_request):
msg = MIMEMultipart()
msg['Subject'] = message
msg['To'] = recipient
msg['From'] = self.username
part = MIMEBase('application', "dash-paymentrequest")
part.set_payload(payment_request)
encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="payreq.dash"')
msg.attach(part)
s = smtplib.SMTP_SSL(self.imap_server, timeout=2)
s.login(self.username, self.password)
s.sendmail(self.username, [recipient], msg.as_string())
s.quit()
class QEmailSignalObject(QObject):
email_new_invoice_signal = pyqtSignal()
class Plugin(BasePlugin):
def fullname(self):
return 'Email'
def description(self):
return _("Send and receive payment requests via email")
def is_available(self):
return True
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.imap_server = self.config.get('email_server', '')
self.username = self.config.get('email_username', '')
self.password = self.config.get('email_password', '')
if self.imap_server and self.username and self.password:
self.processor = Processor(self.imap_server, self.username, self.password, self.on_receive)
self.processor.start()
self.obj = QEmailSignalObject()
self.obj.email_new_invoice_signal.connect(self.new_invoice)
def on_receive(self, pr_str):
self.print_error('received payment request')
self.pr = PaymentRequest(pr_str)
self.obj.email_new_invoice_signal.emit()
def new_invoice(self):
self.parent.invoices.add(self.pr)
#window.update_invoices_list()
@hook
def receive_list_menu(self, menu, addr):
window = menu.parentWidget()
menu.addAction(_("Send via e-mail"), lambda: self.send(window, addr))
def send(self, window, addr):
from electrum_dash import paymentrequest
r = window.wallet.receive_requests.get(addr)
message = r.get('memo', '')
if r.get('signature'):
pr = paymentrequest.serialize_request(r)
else:
pr = paymentrequest.make_request(self.config, r)
if not pr:
return
recipient, ok = QtGui.QInputDialog.getText(window, 'Send request', 'Email invoice to:')
if not ok:
return
recipient = str(recipient)
payload = pr.SerializeToString()
self.print_error('sending mail to', recipient)
try:
self.processor.send(recipient, message, payload)
except BaseException as e:
window.show_message(str(e))
return
window.show_message(_('Request sent.'))
def requires_settings(self):
return True
def settings_widget(self, window):
return EnterButton(_('Settings'), partial(self.settings_dialog, window))
def settings_dialog(self, window):
d = WindowModalDialog(window, _("Email settings"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Server hosting your email acount')))
grid = QGridLayout()
vbox.addLayout(grid)
grid.addWidget(QLabel('Server (IMAP)'), 0, 0)
server_e = QLineEdit()
server_e.setText(self.imap_server)
grid.addWidget(server_e, 0, 1)
grid.addWidget(QLabel('Username'), 1, 0)
username_e = QLineEdit()
username_e.setText(self.username)
grid.addWidget(username_e, 1, 1)
grid.addWidget(QLabel('Password'), 2, 0)
password_e = QLineEdit()
password_e.setText(self.password)
grid.addWidget(password_e, 2, 1)
vbox.addStretch()
vbox.addLayout(Buttons(CloseButton(d), OkButton(d)))
if not d.exec_():
return
server = str(server_e.text())
self.config.set_key('email_server', server)
username = str(username_e.text())
self.config.set_key('email_username', username)
password = str(password_e.text())
self.config.set_key('email_password', password)
|
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
from mock import patch
import pytest
from module_build_service import app
from module_build_service.common import models
from module_build_service.common.models import BUILD_STATES, ModuleBuild
from module_build_service.manage import manager_wrapper, retire
from module_build_service.scheduler.db_session import db_session
from module_build_service.web.utils import deps_to_dict
from tests import clean_database, staged_data_filename
@pytest.mark.usefixtures("model_tests_init_data")
class TestMBSManage:
@pytest.mark.parametrize(
("identifier", "is_valid"),
(
("", False),
("spam", False),
("spam:bacon", True),
("spam:bacon:eggs", True),
("spam:bacon:eggs:ham", True),
("spam:bacon:eggs:ham:sausage", False),
),
)
def test_retire_identifier_validation(self, identifier, is_valid):
if is_valid:
retire(identifier)
else:
with pytest.raises(ValueError):
retire(identifier)
@pytest.mark.parametrize(
("overrides", "identifier", "changed_count"),
(
({"name": "pickme"}, "pickme:eggs", 1),
({"stream": "pickme"}, "spam:pickme", 1),
({"version": "pickme"}, "spam:eggs:pickme", 1),
({"context": "pickme"}, "spam:eggs:ham:pickme", 1),
({}, "spam:eggs", 3),
({"version": "pickme"}, "spam:eggs", 3),
({"context": "pickme"}, "spam:eggs:ham", 3),
),
)
@patch("module_build_service.manage.prompt_bool")
def test_retire_build(self, prompt_bool, overrides, identifier, changed_count):
prompt_bool.return_value = True
module_builds = (
db_session.query(ModuleBuild)
.filter_by(state=BUILD_STATES["ready"])
.order_by(ModuleBuild.id.desc())
.all()
)
# Verify our assumption of the amount of ModuleBuilds in database
assert len(module_builds) == 3
for x, build in enumerate(module_builds):
build.name = "spam"
build.stream = "eggs"
build.version = "ham"
build.context = str(x)
for attr, value in overrides.items():
setattr(module_builds[0], attr, value)
db_session.commit()
retire(identifier)
retired_module_builds = (
db_session.query(ModuleBuild)
.filter_by(state=BUILD_STATES["garbage"])
.order_by(ModuleBuild.id.desc())
.all()
)
assert len(retired_module_builds) == changed_count
for x in range(changed_count):
assert retired_module_builds[x].id == module_builds[x].id
assert retired_module_builds[x].state == BUILD_STATES["garbage"]
@pytest.mark.parametrize(
("confirm_prompt", "confirm_arg", "confirm_expected"),
(
(True, False, True),
(True, True, True),
(False, False, False),
(False, True, True)
),
)
@patch("module_build_service.manage.prompt_bool")
def test_retire_build_confirm_prompt(
self, prompt_bool, confirm_prompt, confirm_arg, confirm_expected
):
prompt_bool.return_value = confirm_prompt
module_builds = db_session.query(ModuleBuild).filter_by(state=BUILD_STATES["ready"]).all()
# Verify our assumption of the amount of ModuleBuilds in database
assert len(module_builds) == 3
for x, build in enumerate(module_builds):
build.name = "spam" + str(x) if x > 0 else "spam"
build.stream = "eggs"
db_session.commit()
retire("spam:eggs", confirm_arg)
retired_module_builds = (
db_session.query(ModuleBuild).filter_by(state=BUILD_STATES["garbage"]).all()
)
expected_changed_count = 1 if confirm_expected else 0
assert len(retired_module_builds) == expected_changed_count
class TestCommandBuildModuleLocally:
"""Test mbs-manager subcommand build_module_locally"""
def setup_method(self, test_method):
clean_database()
# Do not allow flask_script exits by itself because we have to assert
# something after the command finishes.
self.sys_exit_patcher = patch("sys.exit")
self.mock_sys_exit = self.sys_exit_patcher.start()
# The consumer is not required to run actually, so it does not make
# sense to publish message after creating a module build.
self.publish_patcher = patch("module_build_service.common.messaging.publish")
self.mock_publish = self.publish_patcher.start()
# Don't allow conf.set_item call to modify conf actually inside command
self.set_item_patcher = patch("module_build_service.manage.conf.set_item")
self.mock_set_item = self.set_item_patcher.start()
# Avoid to create the local sqlite database for the command, which is
# useless for running tests here.
self.create_all_patcher = patch("module_build_service.manage.db.create_all")
self.mock_create_all = self.create_all_patcher.start()
def teardown_method(self, test_method):
self.create_all_patcher.stop()
self.mock_set_item.stop()
self.publish_patcher.stop()
self.sys_exit_patcher.stop()
def _run_manager_wrapper(self, cli_cmd):
# build_module_locally changes database uri to a local SQLite database file.
# Restore the uri to original one in order to not impact the database
# session in subsequent tests.
original_db_uri = app.config["SQLALCHEMY_DATABASE_URI"]
try:
with patch("sys.argv", new=cli_cmd):
manager_wrapper()
finally:
app.config["SQLALCHEMY_DATABASE_URI"] = original_db_uri
@patch("module_build_service.scheduler.local.main")
def test_set_stream(self, main):
cli_cmd = [
"mbs-manager", "build_module_locally",
"--set-stream", "platform:f28",
"--file", staged_data_filename("testmodule-local-build.yaml")
]
self._run_manager_wrapper(cli_cmd)
# Since module_build_service.scheduler.local.main is mocked, MBS does
# not really build the testmodule for this test. Following lines assert
# the fact:
# Module testmodule-local-build is expanded and stored into database,
# and this build has buildrequires platform:f28 and requires
# platform:f28.
# Please note that, the f28 is specified from command line option
# --set-stream, which is the point this test tests.
builds = db_session.query(models.ModuleBuild).filter_by(
name="testmodule-local-build").all()
assert 1 == len(builds)
testmodule_build = builds[0]
mmd_deps = testmodule_build.mmd().get_dependencies()
deps_dict = deps_to_dict(mmd_deps[0], "buildtime")
assert ["f28"] == deps_dict["platform"]
deps_dict = deps_to_dict(mmd_deps[0], "runtime")
assert ["f28"] == deps_dict["platform"]
@patch("module_build_service.manage.logging")
def test_ambiguous_stream(self, logging):
cli_cmd = [
"mbs-manager", "build_module_locally",
"--file", staged_data_filename("testmodule-local-build.yaml")
]
self._run_manager_wrapper(cli_cmd)
args, _ = logging.error.call_args_list[0]
assert "There are multiple streams to choose from for module platform." == args[0]
args, _ = logging.error.call_args_list[1]
assert "Use '-s module_name:module_stream' to choose the stream" == args[0]
def test_module_build_failed(self):
cli_cmd = [
"mbs-manager", "build_module_locally",
"--set-stream", "platform:f28",
"--file", staged_data_filename("testmodule-local-build.yaml")
]
def main_side_effect(module_build_ids):
build = db_session.query(models.ModuleBuild).filter(
models.ModuleBuild.name == "testmodule-local-build"
).first()
build.state = models.BUILD_STATES["failed"]
db_session.commit()
# We don't run consumer actually, but it could be patched to mark some
# module build failed for test purpose.
with patch("module_build_service.scheduler.local.main",
side_effect=main_side_effect):
with pytest.raises(RuntimeError, match="Module build failed"):
self._run_manager_wrapper(cli_cmd)
|
# -*- test-case-name: twisted.test.test_factories -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Standard implementations of Twisted protocol-related interfaces.
Start here if you are looking to write a new protocol implementation for
Twisted. The Protocol class contains some introductory material.
API Stability: stable, other than ClientCreator.
Maintainer: U{Itamar Shtull-Trauring<mailto:twisted@itamarst.org>}
"""
import random
from zope.interface import implements
# Twisted Imports
from twisted.python import log, failure, components
from twisted.internet import interfaces, error, defer
class Factory:
"""This is a factory which produces protocols.
By default, buildProtocol will create a protocol of the class given in
self.protocol.
"""
implements(interfaces.IProtocolFactory)
# put a subclass of Protocol here:
protocol = None
numPorts = 0
noisy = True
def doStart(self):
"""Make sure startFactory is called.
Users should not call this function themselves!
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting factory %r" % self)
self.startFactory()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopFactory is called.
Users should not call this function themselves!
"""
if self.numPorts == 0:
# this shouldn't happen, but does sometimes and this is better
# than blowing up in assert as we did previously.
return
self.numPorts = self.numPorts - 1
if not self.numPorts:
if self.noisy:
log.msg("Stopping factory %r" % self)
self.stopFactory()
def startFactory(self):
"""This will be called before I begin listening on a Port or Connector.
It will only be called once, even if the factory is connected
to multiple ports.
This can be used to perform 'unserialization' tasks that
are best put off until things are actually running, such
as connecting to a database, opening files, etcetera.
"""
def stopFactory(self):
"""This will be called before I stop listening on all Ports/Connectors.
This can be overridden to perform 'shutdown' tasks such as disconnecting
database connections, closing files, etc.
It will be called, for example, before an application shuts down,
if it was connected to a port. User code should not call this function
directly.
"""
def buildProtocol(self, addr):
"""Create an instance of a subclass of Protocol.
The returned instance will handle input on an incoming server
connection, and an attribute \"factory\" pointing to the creating
factory.
Override this method to alter how Protocol instances get created.
@param addr: an object implementing L{twisted.internet.interfaces.IAddress}
"""
p = self.protocol()
p.factory = self
return p
class ClientFactory(Factory):
"""A Protocol factory for clients.
This can be used together with the various connectXXX methods in
reactors.
"""
def startedConnecting(self, connector):
"""Called when a connection has been started.
You can call connector.stopConnecting() to stop the connection attempt.
@param connector: a Connector object.
"""
def clientConnectionFailed(self, connector, reason):
"""Called when a connection has failed to connect.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
def clientConnectionLost(self, connector, reason):
"""Called when an established connection is lost.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
class _InstanceFactory(ClientFactory):
"""Factory used by ClientCreator."""
noisy = False
def __init__(self, reactor, instance, deferred):
self.reactor = reactor
self.instance = instance
self.deferred = deferred
def __repr__(self):
return "<ClientCreator factory: %r>" % (self.instance, )
def buildProtocol(self, addr):
self.reactor.callLater(0, self.deferred.callback, self.instance)
del self.deferred
return self.instance
def clientConnectionFailed(self, connector, reason):
self.reactor.callLater(0, self.deferred.errback, reason)
del self.deferred
class ClientCreator:
"""Client connections that do not require a factory.
The various connect* methods create a protocol instance using the given
protocol class and arguments, and connect it, returning a Deferred of the
resulting protocol instance.
Useful for cases when we don't really need a factory. Mainly this
is when there is no shared state between protocol instances, and no need
to reconnect.
"""
def __init__(self, reactor, protocolClass, *args, **kwargs):
self.reactor = reactor
self.protocolClass = protocolClass
self.args = args
self.kwargs = kwargs
def connectTCP(self, host, port, timeout=30, bindAddress=None):
"""Connect to remote host, return Deferred of resulting protocol instance."""
d = defer.Deferred()
f = _InstanceFactory(self.reactor, self.protocolClass(*self.args, **self.kwargs), d)
self.reactor.connectTCP(host, port, f, timeout=timeout, bindAddress=bindAddress)
return d
def connectUNIX(self, address, timeout = 30, checkPID=0):
"""Connect to Unix socket, return Deferred of resulting protocol instance."""
d = defer.Deferred()
f = _InstanceFactory(self.reactor, self.protocolClass(*self.args, **self.kwargs), d)
self.reactor.connectUNIX(address, f, timeout = timeout, checkPID=checkPID)
return d
def connectSSL(self, host, port, contextFactory, timeout=30, bindAddress=None):
"""Connect to SSL server, return Deferred of resulting protocol instance."""
d = defer.Deferred()
f = _InstanceFactory(self.reactor, self.protocolClass(*self.args, **self.kwargs), d)
self.reactor.connectSSL(host, port, f, contextFactory, timeout=timeout, bindAddress=bindAddress)
return d
class ReconnectingClientFactory(ClientFactory):
"""My clients auto-reconnect with an exponential back-off.
Note that clients should call my resetDelay method after they have
connected successfully.
@ivar maxDelay: Maximum number of seconds between connection attempts.
@ivar initialDelay: Delay for the first reconnection attempt.
@ivar factor: a multiplicitive factor by which the delay grows
@ivar jitter: percentage of randomness to introduce into the delay length
to prevent stampeding.
"""
maxDelay = 3600
initialDelay = 1.0
# Note: These highly sensitive factors have been precisely measured by
# the National Institute of Science and Technology. Take extreme care
# in altering them, or you may damage your Internet!
factor = 2.7182818284590451 # (math.e)
# Phi = 1.6180339887498948 # (Phi is acceptable for use as a
# factor if e is too large for your application.)
jitter = 0.11962656492 # molar Planck constant times c, Jule meter/mole
delay = initialDelay
retries = 0
maxRetries = None
_callID = None
connector = None
continueTrying = 1
def clientConnectionFailed(self, connector, reason):
if self.continueTrying:
self.connector = connector
self.retry()
def clientConnectionLost(self, connector, unused_reason):
if self.continueTrying:
self.connector = connector
self.retry()
def retry(self, connector=None):
"""Have this connector connect again, after a suitable delay.
"""
if not self.continueTrying:
if self.noisy:
log.msg("Abandoning %s on explicit request" % (connector,))
return
if connector is None:
if self.connector is None:
raise ValueError("no connector to retry")
else:
connector = self.connector
self.retries += 1
if self.maxRetries is not None and (self.retries > self.maxRetries):
if self.noisy:
log.msg("Abandoning %s after %d retries." %
(connector, self.retries))
return
self.delay = min(self.delay * self.factor, self.maxDelay)
if self.jitter:
self.delay = random.normalvariate(self.delay,
self.delay * self.jitter)
if self.noisy:
log.msg("%s will retry in %d seconds" % (connector, self.delay,))
from twisted.internet import reactor
def reconnector():
self._callID = None
connector.connect()
self._callID = reactor.callLater(self.delay, reconnector)
def stopTrying(self):
"""I put a stop to any attempt to reconnect in progress.
"""
# ??? Is this function really stopFactory?
if self._callID:
self._callID.cancel()
self._callID = None
if self.connector:
# Hopefully this doesn't just make clientConnectionFailed
# retry again.
try:
self.connector.stopConnecting()
except error.NotConnectingError:
pass
self.continueTrying = 0
def resetDelay(self):
"""Call me after a successful connection to reset.
I reset the delay and the retry counter.
"""
self.delay = self.initialDelay
self.retries = 0
self._callID = None
self.continueTrying = 1
class ServerFactory(Factory):
"""Subclass this to indicate that your protocol.Factory is only usable for servers.
"""
class BaseProtocol:
"""This is the abstract superclass of all protocols.
If you are going to write a new protocol for Twisted, start here. The
docstrings of this class explain how you can get started. Any protocol
implementation, either client or server, should be a subclass of me.
My API is quite simple. Implement dataReceived(data) to handle both
event-based and synchronous input; output can be sent through the
'transport' attribute, which is to be an instance that implements
L{twisted.internet.interfaces.ITransport}.
Some subclasses exist already to help you write common types of protocols:
see the L{twisted.protocols.basic} module for a few of them.
"""
connected = 0
transport = None
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this Protocol, and calls the
connectionMade() callback.
"""
self.connected = 1
self.transport = transport
self.connectionMade()
def connectionMade(self):
"""Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
connectionDone=failure.Failure(error.ConnectionDone())
connectionDone.cleanFailure()
class Protocol(BaseProtocol):
implements(interfaces.IProtocol)
def dataReceived(self, data):
"""Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(self, reason=connectionDone):
"""Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed.
@type reason: L{twisted.python.failure.Failure}
"""
class ProtocolToConsumerAdapter(components.Adapter):
"""
This class is unstable.
"""
implements(interfaces.IConsumer)
def write(self, data):
self.original.dataReceived(data)
def registerProducer(self, producer, streaming):
pass
def unregisterProducer(self):
pass
components.registerAdapter(ProtocolToConsumerAdapter, interfaces.IProtocol,
interfaces.IConsumer)
class ConsumerToProtocolAdapter(components.Adapter):
"""
This class is unstable.
"""
implements(interfaces.IProtocol)
def dataReceived(self, data):
self.original.write(data)
def connectionLost(self, reason):
pass
def makeConnection(self, transport):
pass
def connectionMade(self):
pass
components.registerAdapter(ConsumerToProtocolAdapter, interfaces.IConsumer,
interfaces.IProtocol)
class ProcessProtocol(BaseProtocol):
"""Processes have some additional methods besides receiving data.
"""
def childDataReceived(self, childFD, data):
if childFD == 1:
self.outReceived(data)
elif childFD == 2:
self.errReceived(data)
def outReceived(self, data):
"""Some data was received from stdout."""
def errReceived(self, data):
"""Some data was received from stderr."""
def childConnectionLost(self, childFD):
if childFD == 0:
self.inConnectionLost()
elif childFD == 1:
self.outConnectionLost()
elif childFD == 2:
self.errConnectionLost()
def inConnectionLost(self):
"""This will be called when stdin is closed."""
def outConnectionLost(self):
"""This will be called when stdout is closed."""
def errConnectionLost(self):
"""This will be called when stderr is closed."""
def processEnded(self, reason):
"""This will be called when the subprocess is finished.
@type reason: L{twisted.python.failure.Failure}
"""
class AbstractDatagramProtocol:
"""Abstract protocol for datagram-oriented transports, e.g. IP, ICMP, ARP, UDP."""
transport = None
numPorts = 0
noisy = True
def __getstate__(self):
d = self.__dict__.copy()
d['transport'] = None
return d
def doStart(self):
"""Make sure startProtocol is called.
This will be called by makeConnection(), users should not call it.
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting protocol %s" % self)
self.startProtocol()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopProtocol is called.
This will be called by the port, users should not call it.
"""
assert self.numPorts > 0
self.numPorts = self.numPorts - 1
self.transport = None
if not self.numPorts:
if self.noisy:
log.msg("Stopping protocol %s" % self)
self.stopProtocol()
def startProtocol(self):
"""Called when a transport is connected to this protocol.
Will only be called once, even if multiple ports are connected.
"""
def stopProtocol(self):
"""Called when the transport is disconnected.
Will only be called once, after all ports are disconnected.
"""
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this DatagramProtocol, and calls the
doStart() callback.
"""
assert self.transport == None
self.transport = transport
self.doStart()
def datagramReceived(self, datagram, addr):
"""Called when a datagram is received.
@param datagram: the string received from the transport.
@param addr: tuple of source of datagram.
"""
class DatagramProtocol(AbstractDatagramProtocol):
"""Protocol for datagram-oriented transport, e.g. UDP."""
def connectionRefused(self):
"""Called due to error from write in connected mode.
Note this is a result of ICMP message generated by *previous*
write.
"""
class ConnectedDatagramProtocol(DatagramProtocol):
"""Protocol for connected datagram-oriented transport.
No longer necessary for UDP.
"""
def datagramReceived(self, datagram):
"""Called when a datagram is received.
@param datagram: the string received from the transport.
"""
def connectionFailed(self, failure):
"""Called if connecting failed.
Usually this will be due to a DNS lookup failure.
"""
class FileWrapper:
"""A wrapper around a file-like object to make it behave as a Transport.
This doesn't actually stream the file to the attached protocol,
and is thus useful mainly as a utility for debugging protocols.
"""
implements(interfaces.ITransport)
closed = 0
disconnecting = 0
producer = None
streamingProducer = 0
def __init__(self, file):
self.file = file
def write(self, data):
try:
self.file.write(data)
except:
self.handleException()
# self._checkProducer()
def _checkProducer(self):
# Cheating; this is called at "idle" times to allow producers to be
# found and dealt with
if self.producer:
self.producer.resumeProducing()
def registerProducer(self, producer, streaming):
"""From abstract.FileDescriptor
"""
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def stopConsuming(self):
self.unregisterProducer()
self.loseConnection()
def writeSequence(self, iovec):
self.write("".join(iovec))
def loseConnection(self):
self.closed = 1
try:
self.file.close()
except (IOError, OSError):
self.handleException()
def getPeer(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file', 'file'
def getHost(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file'
def handleException(self):
pass
def resumeProducing(self):
# Never sends data anyways
pass
def pauseProducing(self):
# Never sends data anyways
pass
def stopProducing(self):
self.loseConnection()
__all__ = ["Factory", "ClientFactory", "ReconnectingClientFactory", "connectionDone",
"Protocol", "ProcessProtocol", "FileWrapper", "ServerFactory",
"AbstractDatagramProtocol", "DatagramProtocol", "ConnectedDatagramProtocol",
"ClientCreator"]
|
# -*- coding: utf-8 -*-
"""
AsciiDoc Reader
===============
This plugin allows you to use AsciiDoc to write your posts.
File extension should be ``.asc``, ``.adoc``, or ``asciidoc``.
"""
from pelican.readers import BaseReader
from pelican.utils import pelican_open
from pelican import signals
import six
try:
# asciidocapi won't import on Py3
from .asciidocapi import AsciiDocAPI, AsciiDocError
# AsciiDocAPI class checks for asciidoc.py
AsciiDocAPI()
except:
asciidoc_enabled = False
else:
asciidoc_enabled = True
class AsciiDocReader(BaseReader):
"""Reader for AsciiDoc files"""
enabled = asciidoc_enabled
file_extensions = ['asc', 'adoc', 'asciidoc']
default_options = ["--no-header-footer", "-a newline=\\n"]
default_backend = 'html5'
def read(self, source_path):
"""Parse content and metadata of asciidoc files"""
from cStringIO import StringIO
with pelican_open(source_path) as source:
text = StringIO(source.encode('utf8'))
content = StringIO()
ad = AsciiDocAPI()
options = self.settings.get('ASCIIDOC_OPTIONS', [])
options = self.default_options + options
for o in options:
ad.options(*o.split())
backend = self.settings.get('ASCIIDOC_BACKEND', self.default_backend)
ad.execute(text, content, backend=backend)
content = content.getvalue().decode('utf8')
metadata = {}
for name, value in ad.asciidoc.document.attributes.items():
name = name.lower()
metadata[name] = self.process_metadata(name, six.text_type(value))
if 'doctitle' in metadata:
metadata['title'] = metadata['doctitle']
return content, metadata
def add_reader(readers):
for ext in AsciiDocReader.file_extensions:
readers.reader_classes[ext] = AsciiDocReader
def register():
signals.readers_init.connect(add_reader)
|
# Generated by Django 2.2.4 on 2019-08-13 12:04
from django.db import migrations
import mdeditor.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_post_clicks'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='excerpt',
),
migrations.AlterField(
model_name='post',
name='body',
field=mdeditor.fields.MDTextField(),
),
]
|
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from drfpasswordless.views import (ObtainEmailCallbackToken,
ObtainMobileCallbackToken,
ObtainAuthTokenFromCallbackToken,
VerifyAliasFromCallbackToken,
ObtainEmailVerificationCallbackToken,
ObtainMobileVerificationCallbackToken, )
urlpatterns = [url(r'^callback/auth/$', ObtainAuthTokenFromCallbackToken.as_view(), name='auth_callback'),
url(r'^auth/email/$', ObtainEmailCallbackToken.as_view(), name='auth_email'),
url(r'^auth/mobile/$', ObtainMobileCallbackToken.as_view(), name='auth_mobile'),
url(r'^callback/verify/$', VerifyAliasFromCallbackToken.as_view(), name='verify_callback'),
url(r'^verify/email/$', ObtainEmailVerificationCallbackToken.as_view(), name='verify_email'),
url(r'^verify/mobile/$', ObtainMobileVerificationCallbackToken.as_view(), name='verify_mobile')]
format_suffix_patterns(urlpatterns)
|
import distutils.sysconfig
import os
import platform
import re
import sys
def get_python_relative_libdir():
"""Returns the appropropriate python libdir relative to the build directory.
@param exe_path the path to the lldb executable
@return the python path that needs to be added to sys.path (PYTHONPATH)
in order to find the lldb python module.
"""
if platform.system() != 'Linux':
return None
# We currently have a bug in lldb -P that does not account for
# architecture variants in python paths for
# architecture-specific modules. Handle the lookup here.
# When that bug is fixed, we should just ask lldb for the
# right answer always.
arch_specific_libdir = distutils.sysconfig.get_python_lib(True, False)
split_libdir = arch_specific_libdir.split(os.sep)
lib_re = re.compile(r"^lib.+$")
for i in range(len(split_libdir)):
match = lib_re.match(split_libdir[i])
if match is not None:
# We'll call this the relative root of the lib dir.
# Things like RHEL will have an arch-specific python
# lib dir, which isn't 'lib' on x86_64.
return os.sep.join(split_libdir[i:])
# Didn't resolve it.
return None
if __name__ == '__main__':
lib_dir = get_python_relative_libdir()
if lib_dir is not None:
sys.stdout.write(lib_dir)
sys.exit(0)
else:
sys.exit(1)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import allure
import coreapi
import pytest
from adcm_client.objects import ADCMClient
from adcm_pytest_plugin.utils import get_data_dir
from adcm_pytest_plugin import utils
from jsonschema import validate
# pylint: disable=E0401, W0601, W0611, W0621, W0212
from tests.library import errorcodes as err
from tests.library import steps
from tests.library.utils import get_random_service, get_random_host_prototype
SCHEMAS = os.path.join(os.path.dirname(__file__), "schemas/")
host_bad_configs = (({"str-key": "{1bbb}", "required": "158", "option": "my.host",
"sub": {"sub1": 3}, "credentials": {"sample_string": "test",
"read_only_initiated": 1}},
"should be integer"),
({"str-key": 61, "required": 158, "fkey": 18.3,
"option": "my.host", "sub": {"sub1": 3},
"credentials": {"sample_string": "txt",
"read_only_initiated": {}}},
'should be string'),
({"str-key": "{1bbb}", "required": 158, "fkey": 18.3,
"option": "my.host", "sub": {"sub1": 9}},
'not in option list'),
({"str-key": "{1bbb}", "required": 158, "option": 8080,
"sub": {"sub1": {"foo": "bar"}}},
'should be flat')
)
@pytest.fixture(scope="module")
def hostprovider(sdk_client_ms: ADCMClient):
bundle = sdk_client_ms.upload_from_fs(get_data_dir(__file__, 'hostprovider_bundle'))
return bundle.provider_create(utils.random_string())
@pytest.fixture(scope="module")
def host(sdk_client_ms: ADCMClient, hostprovider):
return hostprovider.host_create(utils.random_string())
@pytest.fixture(scope="module")
def cluster(sdk_client_ms: ADCMClient):
return sdk_client_ms.upload_from_fs(get_data_dir(__file__, 'cluster_bundle'))
@pytest.fixture(scope="module")
def client(sdk_client_ms: ADCMClient, cluster, hostprovider):
return sdk_client_ms.adcm()._api.objects
class TestHost:
"""
Basic tests for host
"""
def test_validate_host_prototype(self, client):
host_prototype = json.loads(json.dumps(client.stack.host.list()[0]))
schema = json.load(
open(SCHEMAS + '/stack_list_item_schema.json')
)
with allure.step('Match prototype with schema'):
assert validate(host_prototype, schema) is None
steps.delete_all_data(client)
def test_create_host(self, sdk_client_fs: ADCMClient):
"""Check that host have same fqdn and status after reread config
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_bundle'))
hp = bundle.provider_create(utils.random_string())
host = hp.host_create(utils.random_string())
host_status_before = host.status
host_fqdn_before = host.fqdn
with allure.step('Reread host'):
host.reread()
host_status_after = host.status
host_fqdn_after = host.fqdn
with allure.step('Check states and fqdn'):
assert host_fqdn_before == host_fqdn_after
assert host_status_before == host_status_after
def test_shouldnt_create_duplicate_host(self, sdk_client_fs: ADCMClient):
"""We have restriction for create duplicated hosts (wuth the same fqdn).
Scenario:
1. Create hostprovider
2. Create first host
3. Create second host with the same FQDN
4. Check that we've got 409 error for second host creation
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_simple'))
hp = bundle.provider_create(utils.random_string())
hp.host_create("duplicate")
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
hp.host_create('duplicate')
with allure.step('Check host conflict'):
err.HOST_CONFLICT.equal(e, 'duplicate host')
def test_shouldnt_create_host_with_unknown_prototype(self, client):
with allure.step('Create provider'):
provider_id = client.provider.create(prototype_id=client.stack.provider.list()[0]['id'],
name=utils.random_string())['id']
with allure.step('Create host'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.host.create(prototype_id=random.randint(100, 500),
provider_id=provider_id,
fqdn=utils.random_string())
with allure.step('Check PROTOTYPE_NOT_FOUND error'):
err.PROTOTYPE_NOT_FOUND.equal(e, 'prototype doesn\'t exist')
def test_shouldnt_create_host_wo_prototype(self, client):
with allure.step('Create provider'):
provider = client.provider.create(prototype_id=client.stack.provider.list()[0]['id'],
name=utils.random_string())
with allure.step('Try to create host without prototype'):
with pytest.raises(coreapi.exceptions.ParameterError) as e:
client.host.create(provider_id=provider['id'], fqdn=utils.random_string())
with allure.step('Check prototype_id error'):
assert str(e.value) == "{'prototype_id': 'This parameter is required.'}"
def test_shouldnt_create_host_wo_provider(self, client):
with allure.step('Create prototype'):
proto = get_random_host_prototype(client)
with pytest.raises(coreapi.exceptions.ParameterError) as e:
client.host.create(prototype_id=proto['id'], fqdn=utils.random_string())
with allure.step('Check provider_id error'):
assert str(e.value) == "{'provider_id': 'This parameter is required.'}"
def test_create_host_with_max_length_plus_1(self, sdk_client_fs: ADCMClient):
"""We cannot create host with name more then max length
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_simple'))
hp = bundle.provider_create(utils.random_string())
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
hp.host_create(utils.random_string(257))
with allure.step('Check LONG_NAME error'):
err.LONG_NAME.equal(e, 'Host name is too long. Max length is 256')
def test_shouldnt_create_host_with_wrong_name(self, sdk_client_fs: ADCMClient):
"""Check that host name cannot contain special characters
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_simple'))
hp = bundle.provider_create(utils.random_string())
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
hp.host_create(utils.random_string() + utils.random_special_chars())
with allure.step('Check WRONG_NAME error'):
err.WRONG_NAME.equal(e, 'Host name is incorrect. '
'Only latin characters, digits, dots (.)')
def test_get_host_list(self, sdk_client_fs: ADCMClient):
"""Create multiple hosts and check that all hosts was created
"""
expected_list = set()
actual_list = set()
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_simple'))
hp = bundle.provider_create(utils.random_string())
for fqdn in utils.random_string_list():
hp.host_create(fqdn)
expected_list.add(fqdn)
for host in sdk_client_fs.host_list():
actual_list.add(host.fqdn)
with allure.step('Check created hosts with the data from the API'):
assert actual_list == expected_list
def test_get_host_info(self, client):
host = steps.create_host_w_default_provider(client, utils.random_string())
actual = steps.read_host(client, host['id'])
with allure.step('Check created host with the data from the API'):
del actual['status']
del host['status']
assert actual == host
def test_delete_host(self, sdk_client_fs: ADCMClient):
"""Check that we can delete host"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_simple'))
hp = bundle.provider_create(utils.random_string())
host = hp.host_create("deletion_host")
with allure.step('delete host'):
deletion_result = host.delete()
with allure.step('Check that host is deleted'):
assert deletion_result is None
def test_should_return_correct_error_when_read_deleted(self, sdk_client_fs: ADCMClient):
"""Check that we have 409 error if host not found"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_simple'))
hp = bundle.provider_create(utils.random_string())
host = hp.host_create(utils.random_string())
with allure.step('delete host'):
host.delete()
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
host.reread()
with allure.step('Check HOST_NOT_FOUND'):
err.HOST_NOT_FOUND.equal(e)
def test_should_return_correct_error_when_delete_nonexist_host(
self, sdk_client_fs: ADCMClient):
"""If we try to delete deleted host we've got 409 error.
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_simple'))
hp = bundle.provider_create(utils.random_string())
host = hp.host_create(utils.random_string())
with allure.step('delete host'):
host.delete()
with allure.step('delete host second time'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
host.delete()
with allure.step('Check HOST_NOT_FOUND'):
err.HOST_NOT_FOUND.equal(e, 'host doesn\'t exist')
# *** Basic tests for hostcomponent ***
def test_create_hostcomponent(self, sdk_client_fs: ADCMClient):
"""Check that hostcomponent id the same in component list and for service
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(
__file__, 'cluster_service_hostcomponent'))
bundle_hp = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_simple'))
cluster = bundle.cluster_create(utils.random_string())
hp = bundle_hp.provider_create(utils.random_string())
host = hp.host_create(utils.random_string())
cluster.host_add(host)
service = cluster.service_add(name="ZOOKEEPER")
component_list = service.component_list()
component = service.component(name='ZOOKEEPER_CLIENT')
with allure.step('Check component id and name'):
assert component.component_id == component_list[0].component_id
assert component.name == component_list[0].name
def test_get_hostcomponent_list(self, client): # invalid case, random component takes in circle
cluster = steps.create_cluster(client)
service = steps.read_service(client, get_random_service(client)['id'])
cluster_svc = client.cluster.service.create(cluster_id=cluster['id'],
prototype_id=service['id'])
components = client.cluster.service.component.list(cluster_id=cluster['id'],
service_id=cluster_svc['id'])
# create mapping between cluster and hosts, then create hostcomponent on host
hostcomponent_list = []
for fqdn in utils.random_string_list():
host = steps.create_host_w_default_provider(client, fqdn)
steps.add_host_to_cluster(client, host, cluster)
component = random.choice(components)['id']
hostcomponent_list.append({"host_id": host['id'], "service_id": cluster_svc['id'],
"component_id": component})
expected_hostcomponent_list = client.cluster.hostcomponent.create(
cluster_id=cluster['id'], hc=hostcomponent_list)
actual_hs_list = client.cluster.hostcomponent.list(cluster_id=cluster['id'])
with allure.step('Check created data with data from API'):
assert actual_hs_list == expected_hostcomponent_list
class TestHostConfig:
"""Class for test host configuration"""
def test_config_history_url_must_point_to_the_host_config(self, client):
host = steps.create_host_w_default_provider(client, utils.random_string())
config = {"str-key": "{1bbb}", "required": 158, "option": 8080, "sub": {"sub1": 2},
"credentials": {"sample_string": "txt", "read_only_initiated": {}}}
i = 0
with allure.step('Create host history'):
while i < random.randint(0, 10):
client.host.config.history.create(host_id=host['id'],
description=utils.random_string(),
config=config)
i += 1
history = client.host.config.history.list(host_id=host['id'])
with allure.step('Check host history'):
for conf in history:
assert ('host/{0}/config/'.format(host['id']) in conf['url']) is True
steps.delete_all_data(client)
def test_get_default_host_config(self, client):
# Get a default host config and validate it with json schema
host = steps.create_host_w_default_provider(client, utils.random_string())
config_json = {}
with allure.step('Get default configuration from host'):
config = client.host.config.current.list(host_id=host['id'])
if config:
config_json = json.loads(json.dumps(config))
schema = json.load(open(SCHEMAS + '/config_item_schema.json'))
with allure.step('Check config'):
assert validate(config_json, schema) is None
steps.delete_all_data(client)
def test_get_config_from_nonexistant_host(self, sdk_client_fs: ADCMClient):
"""Get configuration for non exist host.
"""
bundle_hp = sdk_client_fs.upload_from_fs(get_data_dir(
__file__, 'hostprovider_simple'))
hp = bundle_hp.provider_create(utils.random_string())
with allure.step('Get host config from a non existant host'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
hp.host(host_id=random.randint(100, 500))
with allure.step('Check error host doesn\'t exist'):
err.HOST_NOT_FOUND.equal(e, 'host doesn\'t exist')
def test_shouldnt_create_host_config_when_config_not_json_string(self, client):
"""Should not create host configuration when config string is not json
"""
host = steps.create_host_w_default_provider(client, utils.random_string())
config = utils.random_string()
with allure.step('Try to create the host config from non-json string'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.host.config.history.create(host_id=host['id'], config=config)
with allure.step('Check error config should not be just one string'):
err.JSON_ERROR.equal(e, 'config should not be just one string')
def test_shouldnt_create_host_config_when_config_is_number(self, client):
"""Should not create host configuration when config string is number
"""
host = steps.create_host_w_default_provider(client, utils.random_string())
config = random.randint(100, 999)
with allure.step('Try to create the host configuration with a number'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
client.host.config.history.create(host_id=host['id'], config=config)
with allure.step('Check error should not be just one int or float'):
err.JSON_ERROR.equal(e, 'should not be just one int or float')
@pytest.mark.parametrize(('config', 'error'), host_bad_configs)
def test_change_host_config_negative(self, host, config, error):
"""Check that we have error if try to update host config with bad configuration
:param host: host object
:param config: dict with bad config
:param error: expected error
"""
with allure.step('Try to create config when parameter is not integer'):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
host.config_set(config)
with allure.step(f'Check error {error}'):
err.CONFIG_VALUE_ERROR.equal(e, error)
def test_should_create_host_config_when_parameter_is_integer_and_not_float(
self, sdk_client_fs: ADCMClient):
"""Create host config for float parameter with integer
"""
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, 'hostprovider_bundle'))
hp = bundle.provider_create(utils.random_string())
host = hp.host_create(utils.random_string())
config = {"str-key": "{1bbb}", "required": 158, "fkey": 18, "option": "my.host",
"sub": {"sub1": 3},
"credentials": {"sample_string": "txt", "read_only_initiated": {}}}
host.config_set(config)
|
class Engine:
PANDAS = "pandas"
POSTGRES = "postgres"
PRESTO = "Presto"
SPARK = "Spark"
SQL_SERVER = "SqlServer"
known_engines = {PANDAS, POSTGRES, PRESTO, SPARK, SQL_SERVER}
|
#!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/imageworks/OpenShadingLanguage
# This shader would ordinarily issue a warning.
# With -Werror, it should be upgraded to an error.
failureok = 1 # this test is expected to have oslc errors
oslcargs = "-Werror"
# No need, the shader in this dir are always compiled
#command = oslc("test.osl")
|
from multiprocessing import freeze_support
from pathlib import Path
from typing import Dict
from deafwave.full_node.full_node import FullNode
from deafwave.rpc.full_node_rpc_api import FullNodeRpcApi
from deafwave.server.outbound_message import NodeType
from deafwave.server.start_service import run_service
from deafwave.util.block_tools import BlockTools, test_constants
from deafwave.util.config import load_config_cli
from deafwave.util.default_root import DEFAULT_ROOT_PATH
from deafwave.util.path import mkdir, path_from_root
from .full_node_simulator import FullNodeSimulator
# See: https://bugs.python.org/issue29288
"".encode("idna")
SERVICE_NAME = "full_node"
def service_kwargs_for_full_node_simulator(root_path: Path, config: Dict, bt: BlockTools) -> Dict:
mkdir(path_from_root(root_path, config["database_path"]).parent)
constants = bt.constants
node = FullNode(
config,
root_path=root_path,
consensus_constants=constants,
name=SERVICE_NAME,
)
peer_api = FullNodeSimulator(node, bt)
network_id = config["selected_network"]
kwargs = dict(
root_path=root_path,
node=node,
peer_api=peer_api,
node_type=NodeType.FULL_NODE,
advertised_port=config["port"],
service_name=SERVICE_NAME,
server_listen_ports=[config["port"]],
on_connect_callback=node.on_connect,
rpc_info=(FullNodeRpcApi, config["rpc_port"]),
network_id=network_id,
)
return kwargs
def main() -> None:
config = load_config_cli(DEFAULT_ROOT_PATH, "config.yaml", SERVICE_NAME)
config["database_path"] = config["simulator_database_path"]
config["peer_db_path"] = config["simulator_peer_db_path"]
config["introducer_peer"]["host"] = "127.0.0.1"
config["introducer_peer"]["port"] = 58735
config["selected_network"] = "testnet0"
config["simulation"] = True
kwargs = service_kwargs_for_full_node_simulator(
DEFAULT_ROOT_PATH,
config,
BlockTools(test_constants),
)
return run_service(**kwargs)
if __name__ == "__main__":
freeze_support()
main()
|
"""
Computes putative binding pockets on protein.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import os
import tempfile
import numpy as np
from subprocess import call
from scipy.spatial import ConvexHull
from deepchem.feat.binding_pocket_features import BindingPocketFeaturizer
from deepchem.feat.fingerprints import CircularFingerprint
from deepchem.models.sklearn_models import SklearnModel
from deepchem.utils import rdkit_util
def extract_active_site(protein_file, ligand_file, cutoff=4):
"""Extracts a box for the active site."""
protein_coords = rdkit_util.load_molecule(
protein_file, add_hydrogens=False)[0]
ligand_coords = rdkit_util.load_molecule(
ligand_file, add_hydrogens=True, calc_charges=True)[0]
num_ligand_atoms = len(ligand_coords)
num_protein_atoms = len(protein_coords)
pocket_inds = []
pocket_atoms = set([])
for lig_atom_ind in range(num_ligand_atoms):
lig_atom = ligand_coords[lig_atom_ind]
for protein_atom_ind in range(num_protein_atoms):
protein_atom = protein_coords[protein_atom_ind]
if np.linalg.norm(lig_atom - protein_atom) < cutoff:
if protein_atom_ind not in pocket_atoms:
pocket_atoms = pocket_atoms.union(set([protein_atom_ind]))
# Should be an array of size (n_pocket_atoms, 3)
pocket_atoms = list(pocket_atoms)
n_pocket_atoms = len(pocket_atoms)
pocket_coords = np.zeros((n_pocket_atoms, 3))
for ind, pocket_ind in enumerate(pocket_atoms):
pocket_coords[ind] = protein_coords[pocket_ind]
x_min = int(np.floor(np.amin(pocket_coords[:, 0])))
x_max = int(np.ceil(np.amax(pocket_coords[:, 0])))
y_min = int(np.floor(np.amin(pocket_coords[:, 1])))
y_max = int(np.ceil(np.amax(pocket_coords[:, 1])))
z_min = int(np.floor(np.amin(pocket_coords[:, 2])))
z_max = int(np.ceil(np.amax(pocket_coords[:, 2])))
return (((x_min, x_max), (y_min, y_max), (z_min, z_max)), pocket_atoms,
pocket_coords)
def compute_overlap(mapping, box1, box2):
"""Computes overlap between the two boxes.
Overlap is defined as % atoms of box1 in box2. Note that
overlap is not a symmetric measurement.
"""
atom1 = set(mapping[box1])
atom2 = set(mapping[box2])
return len(atom1.intersection(atom2)) / float(len(atom1))
def get_all_boxes(coords, pad=5):
"""Get all pocket boxes for protein coords.
We pad all boxes the prescribed number of angstroms.
TODO(rbharath): It looks like this may perhaps be non-deterministic?
"""
hull = ConvexHull(coords)
boxes = []
for triangle in hull.simplices:
# coords[triangle, 0] gives the x-dimension of all triangle points
# Take transpose to make sure rows correspond to atoms.
points = np.array(
[coords[triangle, 0], coords[triangle, 1], coords[triangle, 2]]).T
# We voxelize so all grids have integral coordinates (convenience)
x_min, x_max = np.amin(points[:, 0]), np.amax(points[:, 0])
x_min, x_max = int(np.floor(x_min)) - pad, int(np.ceil(x_max)) + pad
y_min, y_max = np.amin(points[:, 1]), np.amax(points[:, 1])
y_min, y_max = int(np.floor(y_min)) - pad, int(np.ceil(y_max)) + pad
z_min, z_max = np.amin(points[:, 2]), np.amax(points[:, 2])
z_min, z_max = int(np.floor(z_min)) - pad, int(np.ceil(z_max)) + pad
boxes.append(((x_min, x_max), (y_min, y_max), (z_min, z_max)))
return boxes
def boxes_to_atoms(atom_coords, boxes):
"""Maps each box to a list of atoms in that box.
TODO(rbharath): This does a num_atoms x num_boxes computations. Is
there a reasonable heuristic we can use to speed this up?
"""
mapping = {}
for box_ind, box in enumerate(boxes):
box_atoms = []
(x_min, x_max), (y_min, y_max), (z_min, z_max) = box
print("Handing box %d/%d" % (box_ind, len(boxes)))
for atom_ind in range(len(atom_coords)):
atom = atom_coords[atom_ind]
x_cont = x_min <= atom[0] and atom[0] <= x_max
y_cont = y_min <= atom[1] and atom[1] <= y_max
z_cont = z_min <= atom[2] and atom[2] <= z_max
if x_cont and y_cont and z_cont:
box_atoms.append(atom_ind)
mapping[box] = box_atoms
return mapping
def merge_boxes(box1, box2):
"""Merges two boxes."""
(x_min1, x_max1), (y_min1, y_max1), (z_min1, z_max1) = box1
(x_min2, x_max2), (y_min2, y_max2), (z_min2, z_max2) = box2
x_min = min(x_min1, x_min2)
y_min = min(y_min1, y_min2)
z_min = min(z_min1, z_min2)
x_max = max(x_max1, x_max2)
y_max = max(y_max1, y_max2)
z_max = max(z_max1, z_max2)
return ((x_min, x_max), (y_min, y_max), (z_min, z_max))
def merge_overlapping_boxes(mapping, boxes, threshold=.8):
"""Merge boxes which have an overlap greater than threshold.
TODO(rbharath): This merge code is terribly inelegant. It's also quadratic
in number of boxes. It feels like there ought to be an elegant divide and
conquer approach here. Figure out later...
"""
num_boxes = len(boxes)
outputs = []
for i in range(num_boxes):
box = boxes[0]
new_boxes = []
new_mapping = {}
# If overlap of box with previously generated output boxes, return
contained = False
for output_box in outputs:
# Carry forward mappings
new_mapping[output_box] = mapping[output_box]
if compute_overlap(mapping, box, output_box) == 1:
contained = True
if contained:
continue
# We know that box has at least one atom not in outputs
unique_box = True
for merge_box in boxes[1:]:
overlap = compute_overlap(mapping, box, merge_box)
if overlap < threshold:
new_boxes.append(merge_box)
new_mapping[merge_box] = mapping[merge_box]
else:
# Current box has been merged into box further down list.
# No need to output current box
unique_box = False
merged = merge_boxes(box, merge_box)
new_boxes.append(merged)
new_mapping[merged] = list(
set(mapping[box]).union(set(mapping[merge_box])))
if unique_box:
outputs.append(box)
new_mapping[box] = mapping[box]
boxes = new_boxes
mapping = new_mapping
return outputs, mapping
class BindingPocketFinder(object):
"""Abstract superclass for binding pocket detectors"""
def find_pockets(self, protein_file, ligand_file):
"""Finds potential binding pockets in proteins."""
raise NotImplementedError
class ConvexHullPocketFinder(BindingPocketFinder):
"""Implementation that uses convex hull of protein to find pockets.
Based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4112621/pdf/1472-6807-14-18.pdf
"""
def __init__(self, pad=5):
self.pad = pad
def find_all_pockets(self, protein_file):
"""Find list of binding pockets on protein."""
# protein_coords is (N, 3) tensor
coords = rdkit_util.load_molecule(protein_file)[0]
return get_all_boxes(coords, self.pad)
def find_pockets(self, protein_file, ligand_file):
"""Find list of suitable binding pockets on protein."""
protein_coords = rdkit_util.load_molecule(
protein_file, add_hydrogens=False, calc_charges=False)[0]
ligand_coords = rdkit_util.load_molecule(
ligand_file, add_hydrogens=False, calc_charges=False)[0]
boxes = get_all_boxes(protein_coords, self.pad)
mapping = boxes_to_atoms(protein_coords, boxes)
pockets, pocket_atoms_map = merge_overlapping_boxes(mapping, boxes)
pocket_coords = []
for pocket in pockets:
atoms = pocket_atoms_map[pocket]
coords = np.zeros((len(atoms), 3))
for ind, atom in enumerate(atoms):
coords[ind] = protein_coords[atom]
pocket_coords.append(coords)
return pockets, pocket_atoms_map, pocket_coords
class RFConvexHullPocketFinder(BindingPocketFinder):
"""Uses pre-trained RF model + ConvexHulPocketFinder to select pockets."""
def __init__(self, pad=5):
self.pad = pad
self.convex_finder = ConvexHullPocketFinder(pad)
# Load binding pocket model
self.base_dir = tempfile.mkdtemp()
print("About to download trained model.")
# TODO(rbharath): Shift refined to full once trained.
call((
"wget -nv -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/trained_models/pocket_random_refined_RF.tar.gz"
).split())
call(("tar -zxvf pocket_random_refined_RF.tar.gz").split())
call(("mv pocket_random_refined_RF %s" % (self.base_dir)).split())
self.model_dir = os.path.join(self.base_dir, "pocket_random_refined_RF")
# Fit model on dataset
self.model = SklearnModel(model_dir=self.model_dir)
self.model.reload()
# Create featurizers
self.pocket_featurizer = BindingPocketFeaturizer()
self.ligand_featurizer = CircularFingerprint(size=1024)
def find_pockets(self, protein_file, ligand_file):
"""Compute features for a given complex
TODO(rbharath): This has a log of code overlap with
compute_binding_pocket_features in
examples/binding_pockets/binding_pocket_datasets.py. Find way to refactor
to avoid code duplication.
"""
# if not ligand_file.endswith(".sdf"):
# raise ValueError("Only .sdf ligand files can be featurized.")
# ligand_basename = os.path.basename(ligand_file).split(".")[0]
# ligand_mol2 = os.path.join(
# self.base_dir, ligand_basename + ".mol2")
#
# # Write mol2 file for ligand
# obConversion = ob.OBConversion()
# conv_out = obConversion.SetInAndOutFormats(str("sdf"), str("mol2"))
# ob_mol = ob.OBMol()
# obConversion.ReadFile(ob_mol, str(ligand_file))
# obConversion.WriteFile(ob_mol, str(ligand_mol2))
#
# # Featurize ligand
# mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)
# if mol is None:
# return None, None
# # Default for CircularFingerprint
# n_ligand_features = 1024
# ligand_features = self.ligand_featurizer.featurize([mol])
#
# # Featurize pocket
# pockets, pocket_atoms_map, pocket_coords = self.convex_finder.find_pockets(
# protein_file, ligand_file)
# n_pockets = len(pockets)
# n_pocket_features = BindingPocketFeaturizer.n_features
#
# features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))
# pocket_features = self.pocket_featurizer.featurize(
# protein_file, pockets, pocket_atoms_map, pocket_coords)
# # Note broadcast operation
# features[:, :n_pocket_features] = pocket_features
# features[:, n_pocket_features:] = ligand_features
# dataset = NumpyDataset(X=features)
# pocket_preds = self.model.predict(dataset)
# pocket_pred_proba = np.squeeze(self.model.predict_proba(dataset))
#
# # Find pockets which are active
# active_pockets = []
# active_pocket_atoms_map = {}
# active_pocket_coords = []
# for pocket_ind in range(len(pockets)):
# #################################################### DEBUG
# # TODO(rbharath): For now, using a weak cutoff. Fix later.
# #if pocket_preds[pocket_ind] == 1:
# if pocket_pred_proba[pocket_ind][1] > .15:
# #################################################### DEBUG
# pocket = pockets[pocket_ind]
# active_pockets.append(pocket)
# active_pocket_atoms_map[pocket] = pocket_atoms_map[pocket]
# active_pocket_coords.append(pocket_coords[pocket_ind])
# return active_pockets, active_pocket_atoms_map, active_pocket_coords
# # TODO(LESWING)
raise ValueError("Karl Implement")
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointable object SavedModel loading."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import save
from tensorflow.python.training.checkpointable import tracking
class LoadTest(test.TestCase):
def cycle(self, obj):
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(obj, path, signatures={})
return load.load(path)
def test_structure_import(self):
root = tracking.Checkpointable()
root.dep_one = tracking.Checkpointable()
root.dep_two = tracking.Checkpointable()
root.dep_two.dep = tracking.Checkpointable()
root.dep_three = root.dep_two.dep
imported = self.cycle(root)
self.assertIs(imported.dep_three, imported.dep_two.dep)
self.assertIsNot(imported.dep_one, imported.dep_two)
def test_variables(self):
root = tracking.Checkpointable()
root.v1 = variables.Variable(1., trainable=True)
root.v2 = variables.Variable(2., trainable=False)
imported = self.cycle(root)
self.assertEquals(imported.v1.numpy(), 1.0)
self.assertTrue(imported.v1.trainable)
self.assertEquals(imported.v2.numpy(), 2.0)
self.assertFalse(imported.v2.trainable)
def test_capture_variables(self):
root = tracking.Checkpointable()
root.weights = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.weights * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
imported = self.cycle(root)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
imported.weights.assign(4.0)
self.assertEqual(8., imported.f(constant_op.constant(2.)).numpy())
def _make_asset(self, contents):
filename = tempfile.mktemp(prefix=self.get_temp_dir())
with open(filename, "w") as f:
f.write(contents)
return filename
def test_assets(self):
file1 = self._make_asset("contents 1")
file2 = self._make_asset("contents 2")
root = tracking.Checkpointable()
root.asset1 = tracking.TrackableAsset(file1)
root.asset2 = tracking.TrackableAsset(file2)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir, signatures={})
file_io.delete_file(file1)
file_io.delete_file(file2)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = load.load(load_dir)
with open(imported.asset1.asset_path.numpy(), "r") as f:
self.assertEquals("contents 1", f.read())
with open(imported.asset2.asset_path.numpy(), "r") as f:
self.assertEquals("contents 2", f.read())
def test_capture_assets(self):
root = tracking.Checkpointable()
root.vocab = tracking.TrackableAsset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path,
input_signature=[])
imported = self.cycle(root)
origin_output = root.f().numpy()
imported_output = imported.f().numpy()
self.assertNotEqual(origin_output, imported_output)
with open(imported_output, "r") as f:
self.assertEquals("contents", f.read())
def test_dedup_assets(self):
vocab = self._make_asset("contents")
root = tracking.Checkpointable()
root.asset1 = tracking.TrackableAsset(vocab)
root.asset2 = tracking.TrackableAsset(vocab)
imported = self.cycle(root)
self.assertEqual(imported.asset1.asset_path.numpy(),
imported.asset2.asset_path.numpy())
def test_implicit_input_signature(self):
@def_function.function
def func(x):
return 2 * x
root = tracking.Checkpointable()
root.f = func
# Add two traces.
root.f(constant_op.constant(1.))
root.f(constant_op.constant(1))
imported = self.cycle(root)
self.assertEqual(4., imported.f(constant_op.constant(2.)).numpy())
self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())
def test_explicit_input_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def func(x):
return 2 * x
root = tracking.Checkpointable()
root.f = func
imported = self.cycle(root)
self.assertEqual(4., imported.f(constant_op.constant(2.0)).numpy())
def test_nested_functions(self):
f = def_function.function(
lambda x: x*2.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
g = def_function.function(
lambda x: f(x) + 1.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.Checkpointable()
root.g = g
imported = self.cycle(root)
imported.g(constant_op.constant([1.0]))
def test_function_with_default_bool_input(self):
def func(x, training=False):
if training:
return 2 * x
else:
return 7
root = tracking.Checkpointable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
def test_positional_arguments(self):
def func(x, training=False, abc=7.1, defg=7.7):
del abc
if training:
return 2 * x
if defg == 7:
return 6
else:
return 7
root = tracking.Checkpointable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())
def test_member_function(self):
class CheckpointableWithMember(tracking.Checkpointable):
def __init__(self):
super(CheckpointableWithMember, self).__init__()
self._some_value = 20
@def_function.function
def f(self, x, training=False):
if training:
return 2 * x
else:
return 7 + self._some_value
root = CheckpointableWithMember()
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(27, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = self.cycle(root)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())
def test_side_effect_listing(self):
class M(tracking.Checkpointable):
def __init__(self):
super(M, self).__init__()
self.var = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def f(self, x):
if self.var is None:
self.var = variables.Variable(2.)
return x * self.var
m = M()
self.cycle(m)
self.assertEquals(4.0, m.f(constant_op.constant(2.0)).numpy())
def test_basic_backprop(self):
weight = variables.Variable(1., trainable=True)
bias = variables.Variable(0., trainable=True)
g = def_function.function(
lambda x: x*weight + bias,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root = tracking.Checkpointable()
root.weight = weight
root.bias = bias
root.g = g
imported = self.cycle(root)
with backprop.GradientTape(watch_accessed_variables=True) as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 1.0])
def test_callable(self):
class M1(tracking.Checkpointable):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def __call__(self, x):
return x
root = tracking.Checkpointable()
root.m1 = M1()
root.m2 = tracking.Checkpointable()
root.m2.__call__ = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
imported = self.cycle(root)
x = constant_op.constant(1.0)
self.assertTrue(callable(imported.m1))
self.assertAllEqual(root.m1(x), imported.m1(x))
# Note: `root.m2` was not callable since `__call__` attribute was set
# into the instance and not on the class. But after a serialization cycle
# that starts to work.
self.assertTrue(callable(imported.m2))
self.assertAllEqual(root.m2.__call__(x), imported.m2(x))
# Verify that user objects without `__call__` attribute are not callable.
self.assertFalse(callable(imported))
def test_chain_callable(self):
func = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])(
lambda x: x*3.0)
root = tracking.Checkpointable()
root.__call__ = tracking.Checkpointable()
root.__call__.__call__ = tracking.Checkpointable()
root.__call__.__call__.__call__ = func
imported = self.cycle(root)
self.assertTrue(callable(imported))
x = constant_op.constant(1.0)
self.assertAllEqual(imported(x).numpy(), 3.0)
if __name__ == "__main__":
test.main()
|
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import requests
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.PhantomJS()
driver.set_window_size(1120,550)
driver.get("http:10.10.2.1")
#time.sleep(5)
res=0
try:
e1=WebDriverWait(driver,10).until(
EC.presence_of_element_located((By.NAME,"username"))
)
e2=WebDriverWait(driver,10).until(
EC.presence_of_element_located((By.NAME,"passwd"))
)
e3=WebDriverWait(driver,10).until(
EC.presence_of_element_located((By.LINK_TEXT,"Login"))
)
driver.find_element_by_name('username').send_keys('Your_username')
driver.find_element_by_name('passwd').send_keys('Your_password')
driver.find_element_by_name('rememberme').click()
#res=e1&e2&e3
res=BeautifulSoup(driver.page_source)
if "Connected(Default Internet)" not in res.text :
driver.find_element_by_css_selector('.field2 input').click()
#print(res)
res=1
#time.sleep(5)
#a=driver.find_element_by_css_selector('.field2'(1))
#print(a)
#print(driver.current_url)
finally:
if res :
print("Successful!!")
time.sleep(5)
else:
print("Failed :(")
driver.quit()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import unicodedata
import six
import tensorflow as tf
def validate_case_matches_checkpoint(do_lower_case, init_checkpoint):
"""Checks whether the casing config is consistent with the checkpoint name."""
# The casing has to be passed in by the user and there is no explicit check
# as to whether it matches the checkpoint. The casing information probably
# should have been stored in the bert_config.json file, but it's not, so
# we have to heuristically detect it to validate.
if not init_checkpoint:
return
m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint)
if m is None:
return
model_name = m.group(1)
lower_models = [
"uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12",
"multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12"
]
cased_models = [
"cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16",
"multi_cased_L-12_H-768_A-12"
]
is_bad_config = False
if model_name in lower_models and not do_lower_case:
is_bad_config = True
actual_flag = "False"
case_name = "lowercased"
opposite_flag = "True"
if model_name in cased_models and do_lower_case:
is_bad_config = True
actual_flag = "True"
case_name = "cased"
opposite_flag = "False"
if is_bad_config:
raise ValueError(
"You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. "
"However, `%s` seems to be a %s model, so you "
"should pass in `--do_lower_case=%s` so that the fine-tuning matches "
"how the model was pre-training. If this error is wrong, please "
"just comment out this check." % (actual_flag, init_checkpoint,
model_name, case_name, opposite_flag))
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with tf.gfile.GFile(vocab_file, "r") as reader:
while True:
token = convert_to_unicode(reader.readline())
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def convert_by_vocab(vocab, items):
"""Converts a sequence of [tokens|ids] using the vocab."""
output = []
#print("items:",items) #['[CLS]', '日', '##期', ',', '但', '被', '##告', '金', '##东', '##福', '载', '##明', '[MASK]', 'U', '##N', '##K', ']', '保', '##证', '本', '##月', '1', '##4', '[MASK]', '到', '##位', ',', '2', '##0', '##1', '##5', '年', '6', '[MASK]', '1', '##1', '日', '[', 'U', '##N', '##K', ']', ',', '原', '##告', '[MASK]', '认', '##可', '于', '2', '##0', '##1', '##5', '[MASK]', '6', '月', '[MASK]', '[MASK]', '日', '##向', '被', '##告', '主', '##张', '权', '##利', '。', '而', '[MASK]', '[MASK]', '自', '[MASK]', '[MASK]', '[MASK]', '[MASK]', '年', '6', '月', '1', '##1', '日', '[SEP]', '原', '##告', '于', '2', '##0', '##1', '##6', '[MASK]', '6', '[MASK]', '2', '##4', '日', '起', '##诉', ',', '主', '##张', '保', '##证', '责', '##任', ',', '已', '超', '##过', '保', '##证', '期', '##限', '[MASK]', '保', '##证', '人', '依', '##法', '不', '##再', '承', '##担', '保', '##证', '[MASK]', '[MASK]', '[MASK]', '[SEP]']
for i,item in enumerate(items):
#print(i,"item:",item) # ##期
output.append(vocab[item])
return output
def convert_tokens_to_ids(vocab, tokens):
return convert_by_vocab(vocab, tokens)
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class FullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file, do_lower_case=True):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = convert_to_unicode(text)
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer.
Returns:
A list of wordpiece tokens.
"""
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat in ("Cc", "Cf"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
|
from __future__ import annotations
from decimal import Decimal
def inverse_of_matrix(matrix: list[list[float]]) -> list[list[float]]:
"""
A matrix multiplied with its inverse gives the identity matrix.
This function finds the inverse of a 2x2 matrix.
If the determinant of a matrix is 0, its inverse does not exist.
Sources for fixing inaccurate float arithmetic:
https://stackoverflow.com/questions/6563058/how-do-i-use-accurate-float-arithmetic-in-python
https://docs.python.org/3/library/decimal.html
>>> inverse_of_matrix([[2, 5], [2, 0]])
[[0.0, 0.5], [0.2, -0.2]]
>>> inverse_of_matrix([[2.5, 5], [1, 2]])
Traceback (most recent call last):
...
ValueError: This matrix has no inverse.
>>> inverse_of_matrix([[12, -16], [-9, 0]])
[[0.0, -0.1111111111111111], [-0.0625, -0.08333333333333333]]
>>> inverse_of_matrix([[12, 3], [16, 8]])
[[0.16666666666666666, -0.0625], [-0.3333333333333333, 0.25]]
>>> inverse_of_matrix([[10, 5], [3, 2.5]])
[[0.25, -0.5], [-0.3, 1.0]]
"""
D = Decimal # An abbreviation to be conciseness
# Calculate the determinant of the matrix
determinant = D(matrix[0][0]) * D(matrix[1][1]) - D(matrix[1][0]) * D(matrix[0][1])
if determinant == 0:
raise ValueError("This matrix has no inverse.")
# Creates a copy of the matrix with swapped positions of the elements
swapped_matrix = [[0.0, 0.0], [0.0, 0.0]]
swapped_matrix[0][0], swapped_matrix[1][1] = matrix[1][1], matrix[0][0]
swapped_matrix[1][0], swapped_matrix[0][1] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [[float(D(n) / determinant) or 0.0 for n in row] for row in swapped_matrix]
|
# Copyright (c) Ye Liu. All rights reserved.
import nncore
def test_bind_getter():
@nncore.bind_getter('name', 'depth')
class Backbone:
_name = 'ResNet'
_depth = 50
backbone = Backbone()
assert backbone.name == 'ResNet'
assert backbone.depth == 50
|
# Generated by Django 2.1.4 on 2019-03-24 19:19
import datetime
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=16, null=True, verbose_name='用户名')),
('gender', models.CharField(choices=[('male', '男'), ('female', '女')], default='female', max_length=6, verbose_name='性别')),
('mobile', models.CharField(blank=True, max_length=11, null=True, verbose_name='电话')),
('email', models.CharField(blank=True, max_length=100, null=True, verbose_name='邮箱')),
('top_img', models.ImageField(max_length=200, null=True, upload_to='user/')),
('create_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
import os
import click
from yoda.ssh.shell import Shell
from yoda.ssh.config import importHost
# Context obj
class Cmd():
def __init__(self):
self.verbose = False
self.shell = None
self.host = None
pass_cmd = click.make_pass_decorator(Cmd, ensure=True)
class CmdsLoader(click.MultiCommand):
_cmdFolder = os.path.abspath(os.path.join(os.path.dirname(__file__), 'cmds'))
def list_commands(self, ctx):
rv = []
for filename in os.listdir(self._cmdFolder):
if filename.endswith('.py'):
rv.append(filename[:-3])
rv.sort()
return rv
def get_command(self, ctx, name):
try:
cmdFullName = 'yoda.cmds.' + name
mod = __import__(cmdFullName, None, None, ['cmd'])
except ImportError:
return
return mod.cmd
@click.command(cls=CmdsLoader)
@click.option('-v', '--verbose', count=True, help="Explain what is being done")
@click.option('-i', '--interactive', count=True, help="Show all the output from the established remote shell session")
@click.option('-f', '--force', is_flag=True, help="Force the execution of the commands if one fails")
@click.option('-h', '--host', default="myserver", help="The name of the connection defined in ~/.ssh/config file")
@click.pass_context
def yoda(ctx, verbose, interactive, force, host):
shell = Shell(host)
hostConfig = importHost(host)
shell.setConfig(hostConfig[0]['options'])
shell.connect()
if (verbose):
click.echo("Connected to host %s" % host)
# Setting up cmd context
shell.interactive = bool(interactive)
shell.force = force
cmd = Cmd()
cmd.shell = shell
cmd.host = host
cmd.verbose = verbose
ctx.obj = cmd
# TODO
# vim /etc/ssh/sshd_config
# edit Port 17000
# edit PermitRootLogin without-password
# service ssh reload
|
"""This module supports puzzles that place fixed shape regions into the grid."""
from collections import defaultdict
import sys
from typing import Dict, List
from z3 import ArithRef, Int, IntVal, Or, Solver, PbEq
from .fastz3 import fast_and, fast_eq, fast_ne
from .geometry import Lattice, Point, Vector
from .quadtree import ExpressionQuadTree
# Key types for use with the ExpressionQuadTree when adding shape instance
# constraints.
HAS_INSTANCE_ID, NOT_HAS_INSTANCE_ID, HAS_SHAPE_TYPE = range(3)
def canonicalize_shape(shape: List[Vector]) -> List[Vector]:
"""Returns a new shape that's canonicalized.
A canonicalized shape is in sorted order and its first offset is Vector(0, 0).
This helps with deduplication, since equivalent shapes will be canonicalized
identically.
# Arguments
shape (List[Vector]): A list of offsets defining a shape.
# Returns
(List[Vector]): A list of offsets defining the canonicalized version
of the shape, i.e., in sorted order and with first offset equal
to Vector(0, 0).
"""
shape = sorted(shape)
first_negated = shape[0].negate()
return [v.translate(first_negated) for v in shape]
class ShapeConstrainer:
"""Creates constraints for placing fixed shape regions into the grid.
# Arguments
lattice (Lattice): The structure of the grid.
shapes (List[List[Vector]]): A list of region shape definitions.
Each region shape definition should be a list of offsets.
The same region shape definition may be included multiple times to
indicate the number of times that shape may appear (if allow_copies
is false).
solver (z3.Solver, None): A #Solver object. If None, a #Solver will be
constructed.
complete (bool): If true, every cell must be part of a shape region. Defaults
to false.
allow_rotations (bool): If true, allow rotations of the shapes to be placed
in the grid. Defaults to false.
allow_reflections (bool): If true, allow reflections of the shapes to be
placed in the grid. Defaults to false.
allow_copies (bool): If true, allow any number of copies of the shapes to be
placed in the grid. Defaults to false.
"""
_instance_index = 0
def __init__( # pylint: disable=R0913
self,
lattice: Lattice,
shapes: List[List[Vector]],
solver: Solver = None,
complete: bool = False,
allow_rotations: bool = False,
allow_reflections: bool = False,
allow_copies: bool = False
):
ShapeConstrainer._instance_index += 1
if solver:
self.__solver = solver
else:
self.__solver = Solver()
self.__lattice = lattice
self.__complete = complete
self.__allow_copies = allow_copies
self.__shapes = shapes
self.__make_variants(allow_rotations, allow_reflections)
self.__create_grids()
self.__add_constraints()
def __make_variants(self, allow_rotations, allow_reflections):
fs = self.__lattice.transformation_functions(
allow_rotations, allow_reflections)
self.__variants = [
[
list(shape_tuple)
for shape_tuple in {
tuple(canonicalize_shape([f(v) for v in s]))
for f in fs
}
]
for s in self.__shapes
]
def __create_grids(self):
"""Create the grids used to model shape region constraints."""
self.__shape_type_grid: Dict[Point, ArithRef] = {}
for p in self.__lattice.points:
v = Int(f"scst-{ShapeConstrainer._instance_index}-{p.y}-{p.x}")
if self.__complete:
self.__solver.add(v >= 0)
else:
self.__solver.add(v >= -1)
self.__solver.add(v < len(self.__shapes))
self.__shape_type_grid[p] = v
self.__shape_instance_grid: Dict[Point, ArithRef] = {}
for p in self.__lattice.points:
v = Int(f"scsi-{ShapeConstrainer._instance_index}-{p.y}-{p.x}")
if self.__complete:
self.__solver.add(v >= 0)
else:
self.__solver.add(v >= -1)
self.__solver.add(v < len(self.__lattice.points))
self.__shape_instance_grid[p] = v
def __add_constraints(self):
self.__add_grid_agreement_constraints()
self.__add_shape_instance_constraints()
if not self.__allow_copies:
for shape_index, shape in enumerate(self.__shapes):
self.__add_single_copy_constraints(shape_index, shape)
def __add_grid_agreement_constraints(self):
for p in self.__shape_type_grid:
self.__solver.add(
Or(
fast_and(
self.__shape_type_grid[p] == -1,
self.__shape_instance_grid[p] == -1
),
fast_and(
self.__shape_type_grid[p] != -1,
self.__shape_instance_grid[p] != -1
)
)
)
def __add_shape_instance_constraints(self): # pylint: disable=R0914
int_vals = {}
for i in range(max(len(self.__lattice.points), len(self.__variants))):
int_vals[i] = IntVal(i)
quadtree = ExpressionQuadTree(self.__lattice.points)
for instance_id in [self.__lattice.point_to_index(p) for p in self.__lattice.points]:
quadtree.add_expr(
(HAS_INSTANCE_ID, instance_id),
lambda p, i=instance_id: fast_eq(self.__shape_instance_grid[p], int_vals[i]))
quadtree.add_expr(
(NOT_HAS_INSTANCE_ID, instance_id),
lambda p, i=instance_id: fast_ne(self.__shape_instance_grid[p], int_vals[i]))
for shape_index in range(len(self.__variants)):
quadtree.add_expr(
(HAS_SHAPE_TYPE, shape_index),
lambda p, i=shape_index: fast_eq(self.__shape_type_grid[p], int_vals[i]))
root_options = defaultdict(list)
for shape_index, variants in enumerate(self.__variants): # pylint: disable=R1702
for variant in variants:
for root_point in self.__lattice.points:
instance_id = self.__lattice.point_to_index(root_point)
offset_points = set()
for offset_vector in variant:
point = root_point.translate(offset_vector)
if point not in self.__shape_instance_grid:
offset_points = None
break
offset_points.add(point)
if offset_points:
and_terms = []
for p in offset_points:
and_terms.append(quadtree.get_point_expr((HAS_INSTANCE_ID, instance_id), p))
and_terms.append(quadtree.get_point_expr((HAS_SHAPE_TYPE, shape_index), p))
and_terms.append(quadtree.get_other_points_expr(
(NOT_HAS_INSTANCE_ID, instance_id), offset_points))
root_options[root_point].append(fast_and(*and_terms))
for p in self.__lattice.points:
instance_id = self.__lattice.point_to_index(p)
not_has_instance_id_expr = quadtree.get_other_points_expr(
(NOT_HAS_INSTANCE_ID, instance_id), [])
or_terms = root_options[p]
if or_terms:
or_terms.append(not_has_instance_id_expr)
self.__solver.add(Or(*or_terms))
else:
self.__solver.add(not_has_instance_id_expr)
def __add_single_copy_constraints(self, shape_index, shape):
sum_terms = []
for p in self.__shape_type_grid:
sum_terms.append((self.__shape_type_grid[p] == shape_index, 1))
self.__solver.add(PbEq(sum_terms, len(shape)))
@property
def solver(self) -> Solver:
"""(z3.Solver): The #Solver associated with this #ShapeConstrainer."""
return self.__solver
@property
def shape_type_grid(self) -> Dict[Point, ArithRef]:
"""(Dict[Point, ArithRef]): A dictionary of z3 constants of shape types.
Each cell contains the index of the shape type placed in that cell (as
indexed by the shapes list passed in to the #ShapeConstrainer constructor),
or -1 if no shape is placed within that cell.
"""
return self.__shape_type_grid
@property
def shape_instance_grid(self) -> Dict[Point, ArithRef]:
"""(Dict[Point, ArithRef]): z3 constants of shape instance IDs.
Each cell contains a number shared among all cells containing the same
instance of the shape, or -1 if no shape is placed within that cell.
"""
return self.__shape_instance_grid
def print_shape_types(self):
"""Prints the shape type assigned to each cell.
Should be called only after the solver has been checked.
"""
model = self.__solver.model()
min_y = min(p.y for p in self.__shape_type_grid)
min_x = min(p.x for p in self.__shape_type_grid)
max_y = max(p.y for p in self.__shape_type_grid)
max_x = max(p.x for p in self.__shape_type_grid)
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
p = Point(y, x)
shape_index = -1
if p in self.__shape_type_grid:
v = self.__shape_type_grid[p]
shape_index = model.eval(v).as_long()
if shape_index >= 0:
sys.stdout.write(f"{shape_index:3}")
else:
sys.stdout.write(" ")
print()
def print_shape_instances(self):
"""Prints the shape instance ID assigned to each cell.
Should be called only after the solver has been checked.
"""
model = self.__solver.model()
min_y = min(p.y for p in self.__shape_instance_grid)
min_x = min(p.x for p in self.__shape_instance_grid)
max_y = max(p.y for p in self.__shape_instance_grid)
max_x = max(p.x for p in self.__shape_instance_grid)
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
p = Point(y, x)
shape_instance = -1
if p in self.__shape_instance_grid:
v = self.__shape_instance_grid[p]
shape_instance = model.eval(v).as_long()
if shape_instance >= 0:
sys.stdout.write(f"{shape_instance:3}")
else:
sys.stdout.write(" ")
print()
|
def Print():
print('you may want to install beautifulsoup4,not beautfulsoup4')
|
from os.path import abspath, dirname, join
from os import environ, path
_cwd = dirname(abspath(__file__))
basedir = path.abspath(path.dirname(__file__))
class BaseConfiguration(object):
DEBUG = True
SECRET_KEY = 'Test'
CORS = ["http://localhost:4200", "http://127.0.0.1:5000"]
|
# File to hold all objects to ease construction of JSON payload.
# Non-PEP8 property declaration used as JSON serializing is 1:1, eg. "clientId = clientId", not "client_id = clientId"
class Client(object):
clientId = ""
clientVersion = "0.0.1"
def __init__(self, client_id, client_version="0.0.1"):
self.clientId = client_id
self.clientVersion = client_version
class ThreatEntry(object):
def __init__(self, url):
self.url = url
class ThreatInfo(object):
def __init__(self, threatTypes, platformTypes, threatEntryTypes, threatEntries):
self.threatTypes = threatTypes
self.platformTypes = platformTypes
self.threatEntryTypes = threatEntryTypes
self.threatEntries = threatEntries
class Request(object):
def __init__(self, client, threatInfo):
self.client = client
self.threatInfo = threatInfo
|
from .utils.defaults import default_depot_path, default_install_dir, default_symlink_dir
from .utils.filters import f_major_version, f_minor_version
from .utils import query_yes_no
from .utils import current_architecture, current_system, current_libc
from .utils import latest_version
from .utils import DmgMounter, TarMounter
from .utils import Version
from .utils import verify_upstream
from .utils import color, show_verbose
from .download import download_package
import os
import re
import shutil
import subprocess
def is_installed(version, check_symlinks=True):
"""
check if the required version is already installed.
"""
check_list = ["julia"]
if version == "latest":
check_list.append("julia-latest")
if version != "latest" and check_symlinks:
check_list.extend([f"julia-{f_major_version(version)}",
f"julia-{f_minor_version(version)}"])
for path in check_list:
if Version(get_exec_version(shutil.which(path))) != Version(version):
return False
return True
def get_exec_version(path):
ver_cmd = [path, "--version"]
try:
# outputs: "julia version 1.4.0-rc1"
version = subprocess.check_output(ver_cmd).decode("utf-8")
version = version.lower().split("version")[-1].strip()
except: # nopep8
# in case it fails in any situation: invalid target or command(.cmd)
# issue: https://github.com/abelsiqueira/jill/issues/25
version = "0.0.1"
return version
def check_installer(installer_path, ext):
filename = os.path.basename(installer_path)
if not filename.endswith(ext):
msg = f"The installer {filename} should be {ext} file"
raise ValueError(msg)
def last_julia_version(version=None):
# version should follow semantic version syntax
def sort_key(ver):
return float(ver.lstrip("v"))
version = float(f_minor_version(version)) if version else 999.999
proj_versions = os.listdir(os.path.join(default_depot_path(),
"environments"))
proj_versions = [x for x in proj_versions if re.fullmatch(r"v\d+\.\d+", x)]
proj_versions = sorted(filter(lambda ver: sort_key(ver) < version,
proj_versions),
key=sort_key)
if proj_versions:
return proj_versions[-1]
else:
return None
def make_symlinks(src_bin, symlink_dir, version):
if not os.path.isfile(src_bin):
raise(ValueError(f"{src_bin} doesn't exist."))
system = current_system()
if symlink_dir not in map(os.path.normpath, os.environ["PATH"].split(os.pathsep)):
print(f"add {symlink_dir} to PATH")
if system == "winnt":
# FIXME: this alse copies system PATH to user PATH
subprocess.run(["powershell.exe",
"setx", "PATH", f'"$env:PATH;{symlink_dir}"'])
else:
msg = "~/.bashrc will be modified"
msg += "\nif you're not using BASH, then you'll need manually"
msg += f" add {symlink_dir} to your PATH"
print(msg)
rc_file = os.path.expanduser("~/.bashrc")
with open(rc_file, "a") as file:
file.writelines("\n# added by jill\n")
file.writelines(f"export PATH={symlink_dir}:$PATH\n")
print(f"you need to restart your current shell to update PATH")
os.makedirs(symlink_dir, exist_ok=True)
new_ver = Version(get_exec_version(src_bin))
if version == "latest":
# issue 11: don't symlink to julia
link_list = ["julia-latest"]
elif len(Version(version).build) > 0:
link_list = ["julia-dev"]
elif len(new_ver.prerelease) > 0:
# issue #76
# - it is usually unwanted to symlink unstable release to `julia` and `julia-x`
# - still symlink to `julia-x.y` because otherwise there is no way to access the unstable
# release.
link_list = [f"julia-{f_minor_version(version)}"]
else:
link_list = [f"julia-{f(version)}" for f in (f_major_version,
f_minor_version)]
link_list.append("julia")
for linkname in link_list:
linkpath = os.path.join(symlink_dir, linkname)
if current_system() == "winnt":
linkpath += ".cmd"
# symlink rules:
# 1. always symlink latest
# 2. only make new symlink if it's a newer version
# - julia --> latest stable X.Y.Z
# - julia-1 --> latest stable 1.Y.Z
# - julia-1.0 --> latest stable 1.0.Z
# - don't make symlink to patch level
if os.path.exists(linkpath) or os.path.islink(linkpath):
if (os.path.islink(linkpath) and
os.readlink(linkpath) == src_bin):
# happens when installing a new patch version
continue
old_ver = Version(get_exec_version(linkpath))
if show_verbose():
print(f"old symlink version: {old_ver}")
print(f"new installation version: {new_ver}")
if old_ver > new_ver:
# if two versions are the same, use the new one
continue
msg = f"{color.YELLOW}remove old symlink"
msg += f" {linkname}{color.END}"
print(msg)
os.remove(linkpath)
print(f"{color.GREEN}make new symlink {linkpath}{color.END}")
if current_system() == "winnt":
with open(linkpath, 'w') as f:
# create a cmd file to mimic how we do symlinks in linux
f.writelines(['@echo off\n', f'"{src_bin}" %*'])
else:
os.symlink(src_bin, linkpath)
def copy_root_project(version):
mver = f_minor_version(version)
old_ver = last_julia_version(version)
if old_ver is None:
print(
f"Can't find available old root project for version {version}")
return None
env_path = os.path.join(default_depot_path(), "environments")
src_path = os.path.join(env_path, old_ver)
dest_path = os.path.join(env_path, f"v{mver}")
if src_path == dest_path:
return None
if os.path.exists(dest_path):
bak_path = os.path.join(env_path, f"v{mver}.bak")
if os.path.exists(bak_path):
print(f"{color.YELLOW}delete old backup {bak_path}{color.END}")
shutil.rmtree(bak_path)
shutil.move(dest_path, bak_path)
print(f"{color.YELLOW}move {dest_path} to {bak_path}{color.END}")
shutil.copytree(src_path, dest_path)
def install_julia_tarball(package_path,
install_dir,
symlink_dir,
version,
upgrade):
check_installer(package_path, ".tar.gz")
if re.match("(.*)\+(\w+)$", version):
# We want a different folder name for commit builds so that we can have
# julia-dev and julia-latest points to two different julia versions
suffix = 'dev'
else:
suffix = f_minor_version(version)
with TarMounter(package_path) as root:
src_path = root
dest_path = os.path.join(install_dir, f"julia-{suffix}")
if os.path.exists(dest_path):
shutil.rmtree(dest_path)
msg = f"{color.YELLOW}remove previous Julia installation:"
msg += f" {dest_path}{color.END}"
print(msg)
# preserve lib symlinks, otherwise it might cause troubles
# see also: https://github.com/JuliaGPU/CUDA.jl/issues/249
shutil.copytree(src_path, dest_path, symlinks=True)
print(f"{color.GREEN}install Julia to {dest_path}{color.END}")
os.chmod(dest_path, 0o755) # issue 12
bin_path = os.path.join(dest_path, "bin", "julia")
if current_system() == 'winnt':
bin_path += '.exe'
make_symlinks(bin_path, symlink_dir, version)
if upgrade:
copy_root_project(version)
return True
def install_julia_dmg(package_path,
install_dir,
symlink_dir,
version,
upgrade):
check_installer(package_path, ".dmg")
with DmgMounter(package_path) as root:
# mounted image contents:
# ['.VolumeIcon.icns', 'Applications', 'Julia-1.3.app']
appname = next(filter(lambda x: x.lower().startswith('julia'),
os.listdir(root)))
src_path = os.path.join(root, appname)
dest_path = os.path.join(install_dir, appname)
if os.path.exists(dest_path):
msg = f"{color.YELLOW}remove previous Julia installation:"
msg += f" {dest_path}{color.END}"
print(msg)
shutil.rmtree(dest_path)
# preserve lib symlinks, otherwise it might cause troubles
# see also: https://github.com/JuliaGPU/CUDA.jl/issues/249
shutil.copytree(src_path, dest_path, symlinks=True)
print(f"{color.GREEN}install Julia to {dest_path}{color.END}")
bin_path = os.path.join(dest_path,
"Contents", "Resources", "julia", "bin", "julia")
make_symlinks(bin_path, symlink_dir, version)
if upgrade:
copy_root_project(version)
return True
def install_julia_exe(package_path,
install_dir,
symlink_dir,
version,
upgrade):
check_installer(package_path, ".exe")
dest_path = os.path.join(install_dir,
f"julia-{f_minor_version(version)}")
if os.path.exists(dest_path):
shutil.rmtree(dest_path, ignore_errors=True)
msg = f"{color.YELLOW}remove previous Julia installation:"
msg += f" {dest_path}{color.END}"
print(msg)
# build system changes for windows after 1.4
# https://github.com/JuliaLang/julia/blob/release-1.4/NEWS.md#build-system-changes
if Version(version).next_patch() < Version("1.4.0"):
# it's always false if version == "latest"
subprocess.check_output([f'{package_path}',
'/S', f'/D={dest_path}'])
else:
subprocess.check_output([f'{package_path}',
'/VERYSILENT',
f'/DIR={dest_path}'])
print(f"{color.GREEN}install Julia to {dest_path}{color.END}")
bin_path = os.path.join(dest_path, "bin", "julia.exe")
make_symlinks(bin_path, symlink_dir, version)
if upgrade:
copy_root_project(version)
return True
def hello_msg():
msg = f"{color.BOLD}JILL - Julia Installer 4 Linux"
msg += f" (MacOS, Windows and FreeBSD) -- Light{color.END}\n"
print(msg)
def install_julia(version=None, *,
install_dir=None,
symlink_dir=None,
upgrade=False,
upstream=None,
unstable=False,
keep_downloads=False,
confirm=False,
reinstall=False):
"""
Install the Julia programming language for your current system
`jill install [version]` would satisfy most of your use cases, try it first
and then read description of other arguments. `version` is optional, valid
version syntax for it is:
* `stable`: latest stable Julia release. This is the _default_ option.
* `1`: latest `1.y.z` Julia release.
* `1.0`: latest `1.0.z` Julia release.
* `1.4.0-rc1`: as it is.
* `latest`/`nightly`: the nightly builds from source code.
For Linux/FreeBSD systems, if you run this command with `root` account,
then it will install Julia system-widely.
To download from a private mirror, please check `jill download -h`.
Arguments:
version:
The Julia version you want to install.
upstream:
manually choose a download upstream. For example, set it to "Official"
if you want to download from JuliaComputing's s3 buckets.
upgrade:
add `--upgrade` flag also copy the root environment from an older
Julia version.
unstable:
add `--unstable` flag to allow installation of unstable releases for auto version
query. For example, `jill install --unstable` might give you unstable installation
like `1.7.0-beta1`. Note that if you explicitly pass the unstable version, e.g.,
`jill install 1.7.0-beta1`, it will still work.
keep_downloads:
add `--keep_downloads` flag to not remove downloaded releases.
confirm: add `--confirm` flag to skip interactive prompt.
reinstall:
jill will skip the installation if the required Julia version already exists,
add `--reinstall` flag to force the reinstallation.
install_dir:
where you want julia packages installed.
symlink_dir:
where you want symlinks(e.g., `julia`, `julia-1`) placed.
"""
install_dir = install_dir if install_dir else default_install_dir()
install_dir = os.path.abspath(install_dir)
symlink_dir = symlink_dir if symlink_dir else default_symlink_dir()
symlink_dir = os.path.normpath(os.path.abspath(symlink_dir))
system, arch = current_system(), current_architecture()
version = str(version) if (version or str(version) == "0") else ''
version = "latest" if version == "nightly" else version
version = "" if version == "stable" else version
upstream = upstream if upstream else os.environ.get("JILL_UPSTREAM", None)
if system == "linux" and current_libc() == "musl":
# currently Julia tags musl as a system, e.g.,
# https://julialang-s3.julialang.org/bin/musl/x64/1.5/julia-1.5.1-musl-x86_64.tar.gz
system = "musl"
hello_msg()
if system == "winnt":
install_dir = install_dir.replace("\\\\", "\\").strip('\'"')
if not confirm:
version_str = version if version else "latest stable release"
question = "jill will:\n"
question += f" 1) install Julia {version_str} for {system}-{arch}"
question += f" into {color.UNDERLINE}{install_dir}{color.END}\n"
question += f" 2) make symlinks in {color.UNDERLINE}{symlink_dir}{color.END}\n"
question += f"You may need to manually add {color.UNDERLINE}{symlink_dir}{color.END} to PATH\n"
question += "Continue installation?"
to_continue = query_yes_no(question)
if not to_continue:
return False
if upstream:
verify_upstream(upstream)
wrong_args = False
try:
version = latest_version(
version, system, arch, upstream=upstream, stable_only=not unstable)
except ValueError:
# hide the nested error stack :P
wrong_args = True
if wrong_args:
msg = f"wrong version(>= 0.6.0) argument: {version}\n"
msg += f"Example: `jill install 1`"
raise(ValueError(msg))
if not reinstall and is_installed(version):
print(f"julia {version} already installed.")
return True
overwrite = True if version == "latest" else False
print(f"{color.BOLD}----- Download Julia -----{color.END}")
package_path = download_package(version, system, arch,
upstream=upstream,
overwrite=overwrite)
if not package_path:
return False
if package_path.endswith(".dmg"):
installer = install_julia_dmg
elif package_path.endswith(".tar.gz"):
installer = install_julia_tarball
elif package_path.endswith(".exe"):
installer = install_julia_exe
else:
print(f"{color.RED}Unsupported file format for {package_path}{color.END}.")
print(f"{color.BOLD}----- Install Julia -----{color.END}")
installer(package_path, install_dir, symlink_dir, version, upgrade)
if not keep_downloads:
print(f"{color.BOLD}----- Post Installation -----{color.END}")
print("remove downloaded files...")
print(f"remove {package_path}")
os.remove(package_path)
gpg_signature_file = package_path + ".asc"
if os.path.exists(gpg_signature_file):
print(f"remove {gpg_signature_file}")
os.remove(gpg_signature_file)
print(f"{color.GREEN}Done!{color.END}")
|
# Automatically generated
# pylint: disable=all
get = [{'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'ValidCores': [1], 'ValidThreadsPerCore': [1], 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 2, 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'a1.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'ValidCores': [1], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 2, 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [2], 'ValidThreadsPerCore': [1], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'a1.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [2], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [4], 'ValidThreadsPerCore': [1], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'a1.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [4], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'ValidCores': [8], 'ValidThreadsPerCore': [1], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'a1.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'ValidCores': [8], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 16, 'DefaultCores': 16, 'DefaultThreadsPerCore': 1, 'ValidCores': [16], 'ValidThreadsPerCore': [1], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'a1.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 16, 'DefaultThreadsPerCore': 1, 'ValidCores': [16], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}, {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 16, 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'a1.metal', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'BareMetal': True, 'ProcessorInfo': {'SupportedArchitectures': ['arm64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 16}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required'}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True}] # noqa: E501
def get_instances_list() -> list:
'''Returns list EC2 instances with InstanceType = a .'''
# pylint: disable=all
return get
|
#
# Tic Tac Toe
#
import numpy as np
from gym import spaces
WinMasks = [
[
[1,0,0],
[1,0,0],
[1,0,0],
],
[
[0,1,0],
[0,1,0],
[0,1,0],
],
[
[0,0,1],
[0,0,1],
[0,0,1],
],
[
[1,1,1],
[0,0,0],
[0,0,0],
],
[
[0,0,0],
[1,1,1],
[0,0,0],
],
[
[0,0,0],
[0,0,0],
[1,1,1],
],
[
[1,0,0],
[0,1,0],
[0,0,1],
],
[
[0,0,1],
[0,1,0],
[1,0,0],
]
]
WinMasks = np.array(WinMasks).reshape((-1,9))
class SingleAgentTicTacToeEnv(object):
NActions = 9
ObservationShape = (9,)
NState = 9
def __init__(self):
self.Board = np.zeros((9,))
self.action_space = spaces.Discrete(self.NActions)
high = np.ones((self.NActions,))
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
def reset(self):
self.Done = False
self.Board[...] = 0.0
self.BoardHistory = []
self.Side = 1
self.FirstMove = True
return self.observation(self.Side), {"valid_actions":np.array([1,1,0,0,1,0,0,0,0], dtype=np.float32)}
def observation(self, side):
return self.Board * side
def step(self, action):
win = False
draw = False
side = self.Side
other_side = -side
color = side
reward = 0.0
done = False
if self.Board[action] != 0:
# invalid move
reward = -1.0
done = True
else:
self.Board[action] = side
self.BoardHistory.append(self.Board.reshape((3,3)).copy())
for win_mask in WinMasks:
masked = self.Board*color*win_mask
if np.sum(masked) == 3:
reward = 1.0
done = True
break
if np.all(self.Board != 0):
done = True # draw
self.Side = other_side
self.Done = done
self.Reward = reward
return self.observation(self.Side), reward, done, {"valid_actions":np.asarray(self.Board==0, dtype=np.float32)}
def render(self):
if self.Done:
last_move = -self.Side
history = self.BoardHistory
sep = "+---"*len(history) + "+"
lines = [sep]
for irow in (0,1,2):
line = "|"
for b in history:
row = "".join(".xo"[int(c)] for c in b[irow])
line += row + "|"
lines.append(line)
outcome = "draw"
if self.Reward:
outcome = "%s won" % (".xo"[int(last_move)])
lines.append(sep + " " + outcome)
print("\n".join(lines))
if __name__ == "__main__":
import random
def show_board(board):
sep = "+---"*3 + "+"
out = [sep]
for row in board.reshape((3,3)):
line = "| "
for x in row:
line += " OX"[int(x)] + " | "
out.append(line)
out.append(sep)
return "\n".join(out)
class Agent(object):
def __init__(self, side):
self.Side = side
self.Sign = "XO"[side]
self.Color = side*2-1
def reset(self):
pass
def action(self, reward, observation, available_actions):
print(f"{self.Sign}: action:", reward, observation, available_actions)
choices = [i for i, x in enumerate(available_actions) if x]
i = random.choice(choices)
return i
def reward(self, r):
#print(f"{self.Sign}: reward: {r}")
pass
def done(self, r, last_observation):
if r > 0:
print(f"===== {self.Sign} won")
elif r < 0:
print(f"===== {self.Sign} lost")
else:
print("===== draw")
class Callback(object):
def end_turn(self, agents, data):
print(show_board(data["board"]))
def end_episode(self, agents, data):
print("--- game over ---")
print(env.show_history(data["board_history"]))
x_agent = Agent(0)
y_agent = Agent(1)
env = TicTacToeEnv()
env.run([x_agent, y_agent], [Callback])
|
###################################################################
""" Summary: Class and Methods for deriving MCSS based MMP's
About: Derive a matched pair based MCSS from a pair molecules
To do: - extend the method enumerate_fragment_properties to also
enumerate self.mol_smi_dict as this would allow the addition
of a flag '-p' that prints out whole molecule props alongside
MCSS and therefore compute %molecule that the MCSS covers
- could use other descriptors from IW code to get MCSS via
bond count not #Atoms or
- Should move iterators in process_mcss_list_to_string to be numeric
and store numeric ID's in self.largest_mcs_mmp_double/single
- could allow further switched to change behaviour of tie break
where single/double or double alone give tie break MCSS
[connected substructures versus disconnected or both/either]
- Extension to triple cut would allow improved search/match e.g.:
N1(C(c2c(cc3c(c2)OCO3)CC1)c4cc(c(c(c4)OC)O)OC)C(=O)OC CHEMBL311765
N1(C(c2c(cc(cc2)O)CC1)c3ccc(cc3)OCCN4CCCC4)C(=O)OCC CHEMBL94080
"""
###################################################################
import logging
import csv
import os
import sys
import unittest
import tempfile
from builtins import range
from mmp.mmp_data_objects import MMPDataObjectClass
if 'LILLYMOL_HOME' in os.environ:
import pybase.pyopmo as pymo
else:
import pybase.pymo as pymo
class MMPbasedMCSSObjectClass(MMPDataObjectClass):
def __init__(self, logger_object):
"""
Example usage:
mmplogger = logging.getLogger('lillymol_file_logger')
logging.disable(logging.CRITICAL)
my_mmp_mcss_object = MMPbasedMCSSObjectClass(mmplogger)
"""
MMPDataObjectClass.__init__(self, logger_object)
self.logger = logger_object
if len(logging.Logger.manager.loggerDict) < 1:
# exit with system status 1 and custom error
sys.exit("Invalid or no logger object passed to MMPObjectClass. Please create \
and pass a logger and set to use logging.disable if you don't want logging")
# this is used for storing the largest MCS MMP for given pair
self.largest_mcs_mmp_result = {}
self.ref_smi_props = {}
def clean_out_data_mcss_obj(self):
"""Method to clean out all objects in class"""
self.clean_out_data()
self.mcs_mmp.clear()
def enumerate_fragment_properties(self):
"""Writes out the ref_smi_dict to disk, calculates natoms, returns data to self.ref_smi_props
Some complexities in method such as double cut fragments (iw_descr only calcs largest frag)"""
frag_smi_file = tempfile.NamedTemporaryFile(delete=False, suffix='.smi')
frag_smi_props_out = tempfile.NamedTemporaryFile(delete=False)
with open(frag_smi_file.name, "w") as f:
for item in self.refsmi_dict:
if isinstance(item, int):
# can't see an easy way to do this except string compare, [1H] causes iw_descr to crash out
if self.refsmi_dict[item] != '[1H]':
f.write(self.refsmi_dict[item]+" "+str(item)+"\n")
# run pymo.iwdescr
self.logger.info("Running pymo.iwdescr on %s smi with in:%s, out:%s" %
(len(self.refsmi_dict), frag_smi_file.name, frag_smi_props_out.name))
exit_status = pymo.iwdescr(frag_smi_file.name, frag_smi_props_out.name, params_dict={'-l': '', '-v': ''},
loggero=self.logger)
self.logger.debug("Ran iwdescr with exit status %s" % exit_status)
with open(frag_smi_props_out.name, "r") as csv_file:
reader = csv.reader(csv_file, delimiter=' ')
i = -1
for row in reader:
i += 1
# if header row, append headers
if i == 0:
if row[1] != 'w_natoms':
self.logger.warn("When this was written, NATOMs was in array position 1 (zero indexed) with "
"column title w_natoms. Now it's not, it's: %s" % row[1])
sys.exit("When this was written, NATOMs was in array position 1 (zero indexed) with column "
"title w_natom. Now it's not, it's: %s" % row[1])
continue
# we trust there is only one entry per id
# print row[0], row[1]
self.ref_smi_props[int(row[0])] = int(row[1])
frag_smi_props_out.close()
self.logger.debug("Completed load of %s mol props from dict of %s from file %s" %
(len(self.ref_smi_props), len(self.refsmi_dict)/2, frag_smi_props_out.name))
def get_largest_mcs_pairs(self, out_file, cut_type, mdc_atm_soft=None, mdc_atm_soft_threshold=None,
mdc_atm_hard=None):
"""Method to print out a single smi - smi pair from the input CSV with data differences. Selection of the
exact matched pair for a given smi - smi combination is based on the largest Maximum Common Substructure
which equates to the MMP with the smallest MWT/#Atoms difference across all MMP's for that smi/smi combo
out_file:
The user specified output file
cut_type:
Specifies the type of fragmentation required. Allowed values are SINGLE,
DOUBLE or BOTH. Currently this class does not support anything greater than
double cut fragmentation
mdc_atm_hard:
max double cut atom cutoff (hard)
Never consider double cut context fragments where one half has num_atoms <= mdc_atm_hard
i.e.: this is a hard cutoff filter implemented during dicer parsing
mdc_atm_soft:
max double cut atom cutoff (soft)
* must be used with mdc_atm_soft_threshold
When double cut is greater than single, if one part of double context has num_atoms <= mdc_atm_soft and
total double cut atom <= single cut atoms + mdc_atm_soft_threshold then discard
mdc_atm_soft_threshold:
max double cut atom cutoff threshold (soft)
* must be used with mdc_atm_soft
This gets added to single cut num atoms each comparison that's done, if and when mdc_atm_soft is set
see details of mdc_atm_soft
Example usage:
# give me a CSV named my_output.pairs of all MCS based pairs:
my_mmp_object.get_largest_mcs_pairs('myoutput.csv', 'BOTH', 'DICER')
# give me a CSV of only the DOUBLE cut MCS based pairs with RDKit attachment points:
my_mmp_object.get_largest_mcs_pairs('myoutput.csv', 'DOUBLE', 'RDKIT')
"""
if (mdc_atm_soft is not None and mdc_atm_soft_threshold is None) or\
(mdc_atm_soft is None and mdc_atm_soft_threshold is not None):
sys.exit("Error, mdc_atm_soft and mdc_atm_soft_threshold must be specified together.")
def process_mcss_list_to_string(prefix, input_list):
"""sub method to build a printable string from input list of specific structure"""
out_string = ''
num_of_entries = len(input_list)
if num_of_entries > 4:
for i_ in range(0, num_of_entries, 4):
out_string = out_string + prefix + "_" + str((i_/4)+1) + "," + str(molid_L) + "," + str(molid_R)
out_string = out_string + "," + str(sum(input_list[0 + i_])) + "," + str(input_list[1 + i_]) + ","
out_string = out_string + str(input_list[2 + i_]) + "," + str(input_list[3 + i_])
out_string += "\n"
else:
if len(input_list[1]) > 1:
ctx_smi = self.refsmi_dict[input_list[1][0]] + "." + self.refsmi_dict[input_list[1][1]]
else:
ctx_smi = self.refsmi_dict[input_list[1][0]]
out_string = prefix + "," + str(molid_L) + "," + str(molid_R) + ","
out_string = out_string + str(sum(input_list[0])) + "," + ctx_smi + ","
out_string = out_string + str(self.refsmi_dict[input_list[2]]) + "," \
+ str(self.refsmi_dict[input_list[3]])
out_string += "\n"
return out_string
def disambiguate_double_list(input_list):
"""sub method to untangle double cut tie break cases"""
num_of_entries = len(input_list)
filtered_list = []
# The tie code should have only saved the example with the largest 'smallest fragment' size
# so now we just take the first example where atom numbering [1 before [2
# Theoretically, if two different examples of a double cut fragmentation pattern exist with the same number
# of atoms *in both parts* of the context, then there is another tie break here. e.g.:
# num_atoms in context = (2,10) should always appear not (1,11) but can't disentangle many (1,11)
# Decided not to handle this and instead just take the first one with the ordered numbering
for i_ in range(0, num_of_entries, 4):
# only use if the isomeric label is the right way round, [1 before [2
if '[1' in self.refsmi_dict[input_list[1 + i_][0]]:
filtered_list = input_list[(0 + i_): (4 + i_)]
else:
continue
return filtered_list
def remove_atom_num_dupes(input_list):
"""sub method to get only 1 example of simple isomeric numbering flip"""
# only use if the isomeric label is the right way round, [1 before [2
if '[1' in self.refsmi_dict[input_list[1][0]]:
# take the first 4 items
output_list = input_list[:4]
else:
# just take the last 4 items
output_list = input_list[-4:]
return output_list
self.logger.info('Opening output file for write: %s' % out_file)
# check cut_type, convert to int
if cut_type.upper() == 'DOUBLE':
# confusing but faster later
cut_type_id = 3
elif cut_type.upper() == 'BOTH':
# confusing but faster later
cut_type_id = 2
elif cut_type.upper() == 'SINGLE':
cut_type_id = 1
else:
self.logger.warn('cut_type specification is incorrect, using single cut: %s' % cut_type.upper())
cut_type_id = 1
# fail if both single_pairs_dict and double_pairs_dict are empty
if (len(self.single_pairs_dict) == 0) and (len(self.double_pairs_dict) == 0):
self.logger.debug('No data found in single_pairs_dict and/or double_pairs_dict, expect no results')
# sys.exit("Error: no data found in single_pairs_dict and/or double_pairs_dict, nothing to find and write")
#
# Here we build data structures of type:
# self.largest_mcs_mmp_result[(molid_L, molid_R)] = [(#atoms, #atoms or None),
# (context_id, context_id or None), frag_Left_id, frag_Right_id]
#
# single - this is easy as we only keep/store the one with the greatest number of atoms
if cut_type_id <= 2:
for molid_L, molid_R, ctx_id, frag_L_id, frag_R_id in \
self.iterator_single_pairs_dict_numeric(inc_attachpt=False):
if (molid_L, molid_R) in self.largest_mcs_mmp_result:
if self.largest_mcs_mmp_result[(molid_L, molid_R)][0][0] <= self.ref_smi_props[ctx_id]:
if self.largest_mcs_mmp_result[(molid_L, molid_R)][0][0] == self.ref_smi_props[ctx_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)].extend(
[(self.ref_smi_props[ctx_id], ), (ctx_id, ), frag_L_id, frag_R_id])
else:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = [
(self.ref_smi_props[ctx_id], ), (ctx_id, ), frag_L_id, frag_R_id]
else:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = [
(self.ref_smi_props[ctx_id], ), (ctx_id, ), frag_L_id, frag_R_id]
# now build the final results on the fly
# double - for each one we compare against what we already have in self.largest_mcs_mmp_result
ctx_natoms = None
if cut_type_id >= 2:
for molid_L, molid_R, ctx1_id, ctx2_id, frag_L_id, frag_R_id in \
self.iterator_double_pairs_dict_numeric(inc_attachpt=False):
#
if ctx1_id in self.ref_smi_props:
ctx_natoms = (self.ref_smi_props[ctx1_id], )
else:
ctx1_smi = self.refsmi_dict[ctx1_id]
ctx1_smi = ctx1_smi.replace("[1", "[9")
ctx1_smi = ctx1_smi.replace("[2", "[1")
ctx1_smi = ctx1_smi.replace("[9", "[2")
try:
ctx_natoms = (self.ref_smi_props[self.refsmi_dict[ctx1_smi]], )
except:
print("ERR >>>")
print(("{} {} {} {} {} {}".format(molid_L, molid_R, ctx1_id, ctx2_id, frag_L_id, frag_R_id)))
print(("{} {} {}".format(ctx1_id, ctx1_smi, self.refsmi_dict[ctx1_smi])))
print("")
if ctx2_id in self.ref_smi_props:
ctx_natoms = ctx_natoms + (self.ref_smi_props[ctx2_id], )
else:
ctx2_smi = self.refsmi_dict[ctx2_id]
ctx2_smi = ctx2_smi.replace("[1", "[9")
ctx2_smi = ctx2_smi.replace("[2", "[1")
ctx2_smi = ctx2_smi.replace("[9", "[2")
ctx_natoms = ctx_natoms + (self.ref_smi_props[self.refsmi_dict[ctx2_smi]], )
# If the indicator flag check_all_context is set to true we need to pre-filter all ctx fragments
# to ensure they are greater than or equal to the specified limit for mdc_atm_hard (maximum double
# cut atoms hard limit). This is a crude filter and could remove valid double cut MCSS.
if mdc_atm_hard is not None:
if ctx_natoms[0] <= mdc_atm_hard:
continue
elif ctx_natoms[1] <= mdc_atm_hard:
continue
#
# Main
# have we seen this smi - smi pair before?
if (molid_L, molid_R) in self.largest_mcs_mmp_result:
# get the number of atoms in the context
num_atoms_existing = self.largest_mcs_mmp_result[(molid_L, molid_R)][0]
if len(num_atoms_existing) > 1:
total_num_atoms_existing = sum(num_atoms_existing)
else:
total_num_atoms_existing = num_atoms_existing[0]
total_num_atoms_new = sum(ctx_natoms)
if total_num_atoms_new > total_num_atoms_existing:
# if it is a double and we have a min fragment setting
if mdc_atm_soft is not None:
# if it falls below the threshold at which we apply this min frag setting
if total_num_atoms_new <= (total_num_atoms_existing + mdc_atm_soft_threshold):
# only keep if both frag sizes are legal
if '[1' in self.refsmi_dict[ctx1_id]:
if (ctx_natoms[0] > mdc_atm_soft) and (ctx_natoms[1] > mdc_atm_soft):
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
# above threshold so keep anyway
else:
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
else:
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
# tie-break
elif total_num_atoms_new == total_num_atoms_existing:
# single always wins over double, so only consider this if existing is double
# double cut tie breaks get disambiguated later using custom function
if len(num_atoms_existing) == 1:
continue
else:
# consider the size of the 'smallest fragment' and add if same, replace if bigger,
# drop if smaller
if min(ctx_natoms) > min(num_atoms_existing):
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = \
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id]
elif min(ctx_natoms) == min(num_atoms_existing):
self.largest_mcs_mmp_result[(molid_L, molid_R)].extend(
[ctx_natoms, (ctx1_id, ctx2_id), frag_L_id, frag_R_id])
else:
# don't store as we have a better context with a larger 'smallest fragment'
continue
# double cut context must be smaller than what we already have so discard this new one
else:
continue
else:
# new result, case where we only have a double cut MCSS so add it!
if '[1' in self.refsmi_dict[ctx1_id]:
self.largest_mcs_mmp_result[(molid_L, molid_R)] = [ctx_natoms, (ctx1_id, ctx2_id),
frag_L_id, frag_R_id]
with open(out_file, "w") as final_out:
final_out.write('CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R\n')
# do single cut first as these take precedence above a double
for (molid_L, molid_R) in self.largest_mcs_mmp_result:
list_length = len(self.largest_mcs_mmp_result[(molid_L, molid_R)])
# the list self.largest_mcs_mmp_result[(molid_L, molid_R)] contains an ordered list of items
# the first 4 are (1) a tuple of the num_atoms (2) fragment (3&4) context in two parts
# Therefore if the list is greater than 8 items it means we have more than one double
# cut that we need to consider, possibly as a double cut tie break. We do not consider the
# case where there are 8 items as we know this will be two identical fragmentation patterns
# with differing isomeric numbering on the atom attachment points therefore we use >8 not >=8
if list_length > 8:
if len(self.largest_mcs_mmp_result[(molid_L, molid_R)][0]) == 1:
# disambiguate single cut list
final_out.write(process_mcss_list_to_string('SINGLE', self.largest_mcs_mmp_result[
(molid_L, molid_R)][0:4]))
else:
# print("Double won (a): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
new_list = disambiguate_double_list(self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('DOUBLE', new_list))
elif list_length == 4:
# print("Single won (a): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('SINGLE', self.largest_mcs_mmp_result[
(molid_L, molid_R)]))
else:
# print("Double wins (b): ", molid_L, molid_R, self.largest_mcs_mmp_result[(molid_L, molid_R)])
# need to remove atom numbering dupes then print
new_list = remove_atom_num_dupes(self.largest_mcs_mmp_result[(molid_L, molid_R)])
final_out.write(process_mcss_list_to_string('DOUBLE', new_list))
class _TestMMPbasedMCSSObjectClass(unittest.TestCase):
"""Test class for MMPDataObjectClass(object) written to use pythons unittest
Example usage:
python mmp_mcss_objects.py
coverage run mmp_mcss_objects.py
coverage report mmp_mcss_objects.py
"""
def setUp(self):
"""Instantiate temp file names, test data objects that get written to temp files
a silent logger object (needed to instantiate class) and the mmp object we'll test"""
self.maxDiff = None
# setup test data location use tempfile.NamedTemporaryFile(delete=False) to persist data on disk
self.temp_file_input_smi_01 = tempfile.NamedTemporaryFile(delete=False, suffix=".smi",
encoding='utf-8', mode='wt')
self.temp_file_input_smi_03 = tempfile.NamedTemporaryFile(delete=False, suffix=".smi",
encoding='utf-8', mode='wt')
self.temp_file_output_pairs = tempfile.NamedTemporaryFile(delete=False)
# setup a logger object
self.mmplogger = logging.getLogger('mmpobjectclass_testlogger')
# logging.disable(logging.CRITICAL)
# create empty mmp object
self.test_mmp_mcss_object = MMPbasedMCSSObjectClass(self.mmplogger)
# data set for use in testing input
self.test_dataset_goldeninput_smi_01 = {
# The following represent synthetic data, analogues of CHEMBL1382609
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL1382609/
# 1. substituents are added to the pyrazole ring to generate side chain MMPs
# H on CHEMBL1382609 between two methyls is changed to Br, F, C, I to
# visually see the change in the smiles string (avoiding Cl as already present)
# e.g.: N1C(=C(Br)C(=N1)C)C
# 2. core ring system is modified (phenyl to pyridine) to see ring switch MMP's
# Presence/Absence of Pyridine-N and N-positional isomerism in Cl-Ph ring
# e.g.: C2=NC(=CS2)C2=CC=C(Cl)C=C2 + addition of N ->
# C2=NC(=CS2)C2=CN=C(Cl)C=C2 + move N around ring ->
# C2=NC(=CS2)C2=NC=C(Cl)C=C2
# for 1,2 single wins
'001': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(Br)C(=N1)C)C',
'002': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
# for 2,5 double wins tie
'003': 'N1(C2=NC(=CS2)C2=CN=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
# The following represent synthetic data, analogues of CHEMBL1341352
# for 1341352 and it's synthetic unsubstituted analogue there is no double
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL1341352/
'1341352': 'Cc1cc(nn1CC(=O)NCc2ccccc2)C(F)(F)F',
'004': 'c1cc(nn1CC(=O)NCc2ccccc2)',
# more double cut only
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL6211
# https://www.ebi.ac.uk/chembl/compound_report_card/CHEMBL6232
'6211': 'O=C(OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC)CCCCCCC',
'6232': 'O=C(N1C(CN(C(=O)c2cc(c(OC)c(c2)OC)OC)CC1)COC(=O)CC(C)(C)C)c1cc(c(OC)c(OC)c1)OC'
}
self.test_dataset_goldeninput_smi_03 = {
# repeat of above
'001': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(Br)C(=N1)C)C',
'002': 'N1(C2=NC(=CS2)C2=CC=C(Cl)C=C2)C(=C(F)C(=N1)C)C',
}
# all smiles are output from above input as either a repeat smiles or a fragment of them
self.test_dataset_golden_output_01 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'DOUBLE,2,3,14,[1ClH].Fc1c([n](c2sc[2cH][n]2)[n]c1C)C,[1cH]1cc[2cH]cc1,[n]1[1cH]cc[2cH]c1': None,
'DOUBLE,3,2,14,[1ClH].Fc1c([n](c2sc[2cH][n]2)[n]c1C)C,[n]1[1cH]cc[2cH]c1,[1cH]1cc[2cH]cc1': None,
'SINGLE,1341352,4,11,O=C(NCc1ccccc1)[1CH3],Cc1[1nH][n]c(C(F)(F)F)c1,[1nH]1[n]ccc1': None,
'SINGLE,4,1341352,11,O=C(NCc1ccccc1)[1CH3],[1nH]1[n]ccc1,Cc1[1nH][n]c(C(F)(F)F)c1': None,
'DOUBLE,6211,6232,40,[1CH4].[2CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,[1CH3]CCC[2CH3],C[12CH2]C': None,
'DOUBLE,6232,6211,40,[1CH4].[2CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,C[12CH2]C,[1CH3]CCC[2CH3]': None}
self.test_dataset_golden_output_02 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'SINGLE,2,3,13,Fc1c([n](c2sc[1cH][n]2)[n]c1C)C,Clc1cc[1cH]cc1,Clc1[n]c[1cH]cc1': None,
'SINGLE,3,2,13,Fc1c([n](c2sc[1cH][n]2)[n]c1C)C,Clc1[n]c[1cH]cc1,Clc1cc[1cH]cc1': None,
'SINGLE,1341352,4,11,O=C(NCc1ccccc1)[1CH3],Cc1[1nH][n]c(C(F)(F)F)c1,[1nH]1[n]ccc1': None,
'SINGLE,4,1341352,11,O=C(NCc1ccccc1)[1CH3],[1nH]1[n]ccc1,Cc1[1nH][n]c(C(F)(F)F)c1': None,
'SINGLE,6211,6232,39,[1CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,[1CH3]CCCCC,C[1CH](C)C': None,
'SINGLE,6232,6211,39,[1CH3]C(=O)OCC1N(C(=O)c2cc(c(OC)c(c2)OC)OC)CCN(C1)C(=O)c1cc(c(OC)c(OC)c1)OC,C[1CH](C)C,[1CH3]CCCCC': None}
self.test_dataset_golden_output_03 = {'CUT_TYPE,MOL_ID_L,MOL_ID_R,NATOMS,MCSS,FRAG_L,FRAG_R': None,
'SINGLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'SINGLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None,
'DOUBLE,1,2,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1BrH],[1FH]': None,
'DOUBLE,2,1,19,Clc1ccc(c2csc([n]3[n]c([1cH]c3C)C)[n]2)cc1,[1FH],[1BrH]': None}
# write test data to temp file (smi)
for smi_id, smi in list(self.test_dataset_goldeninput_smi_01.items()):
self.temp_file_input_smi_01.write(smi + " " + smi_id + "\n")
self.temp_file_input_smi_01.close()
# write test data to temp file (smi)
for smi_id, smi in list(self.test_dataset_goldeninput_smi_03.items()):
self.temp_file_input_smi_03.write(smi + " " + smi_id + "\n")
self.temp_file_input_smi_03.close()
# container for results data
self.test_dataset_testresults = {}
def tearDown(self):
"""Tear down object for clean reuse in further tests"""
# clean out the object
self.test_mmp_mcss_object.clean_out_data()
# clean out the temp data store
self.test_dataset_testresults.clear()
os.remove(self.temp_file_input_smi_01.name)
def test_get_largest_mcs_pairs_with_diff(self):
"""Test method to get largest MCS MMP for given smi - smi pair"""
# 6. full build then write of pairs to file, but only for a single named column
self.test_mmp_mcss_object.build_from_dicer(self.temp_file_input_smi_01.name, 'BOTH', 'NONE')
self.test_mmp_mcss_object.enumerate_fragment_properties()
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH')
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
#print(self.test_dataset_testresults)
self.assertEqual(self.test_dataset_golden_output_01, self.test_dataset_testresults)
def test_get_largest_mcs_pairs_mdc_atm_hard(self):
"""Test method to get largest MCS MMP for given smi - smi pair"""
# 6. full build then write of pairs to file, but only for a single named column
self.test_mmp_mcss_object.build_from_dicer(self.temp_file_input_smi_01.name, 'BOTH', 'NONE')
self.test_mmp_mcss_object.enumerate_fragment_properties()
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH', mdc_atm_hard=4)
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
#print(self.test_dataset_testresults)
self.assertEqual(self.test_dataset_golden_output_02, self.test_dataset_testresults)
def test_get_largest_mcs_pairs_mdc_atm_soft(self):
"""Test method to get largest MCS MMP for given smi - smi pair"""
# 6. full build then write of pairs to file, but only for a single named column
self.test_mmp_mcss_object.build_from_dicer(self.temp_file_input_smi_03.name, 'BOTH', 'NONE')
self.test_mmp_mcss_object.enumerate_fragment_properties()
#
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH')
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
self.test_mmp_mcss_object.get_largest_mcs_pairs(self.temp_file_output_pairs.name, 'BOTH', mdc_atm_soft=3,
mdc_atm_soft_threshold=4)
# now read it back into temp object and check it's what we wrote out!
test_results_filehandle = open(self.temp_file_output_pairs.name, 'r')
for line in test_results_filehandle:
line = line.rstrip('\r')
line = line.rstrip('\n')
self.test_dataset_testresults[line] = None
test_results_filehandle.close()
#print(self.test_dataset_testresults)
self.assertEqual(self.test_dataset_golden_output_03, self.test_dataset_testresults)
if __name__ == '__main__':
unittest.main()
|
import os
import json
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
class Action:
def __init__(self, card, config):
config.pop("action", None)
self.card = card
self.config = config
def env_vars_for_object(self, config, prefix):
env_vars = {}
config.pop("action", None)
config.pop("id", None)
for key, value in config.items():
if value and isinstance(value, dict):
nested_env_vars = self.env_vars_for_object(
value, "{}_{}".format(prefix, key.upper())
)
env_vars = {**env_vars, **nested_env_vars}
else:
env_vars["{}_{}".format(prefix, key.upper())] = value
return env_vars
def env_vars(self):
with open(CURRENT_DIR + "/../../config/config.json", "r") as f:
global_config = json.load(f)
env_vars = self.env_vars_for_object(self.card, "CARD")
env_vars["magic_cards_room"] = global_config["room"]
prefix = self.__class__.__name__.replace("Action", "").upper()
return {**env_vars, **self.env_vars_for_object(self.config, prefix)}
class ChromecastAction(Action):
def __init__(self, card, config, chromecast):
super().__init__(card, config)
self.chromecast = chromecast
|
from office365.runtime.client_value_collection import ClientValueCollection
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.tenant.administration.hubSiteProperties import HubSiteProperties
from office365.sharepoint.tenant.administration.secondary_administrators_fields_data import \
SecondaryAdministratorsFieldsData
from office365.sharepoint.tenant.administration.secondary_administrators_info import SecondaryAdministratorsInfo
from office365.sharepoint.tenant.administration.site_properties import SiteProperties
from office365.sharepoint.tenant.administration.site_properties_collection import SitePropertiesCollection
from office365.sharepoint.tenant.administration.sitePropertiesEnumerableFilter import SitePropertiesEnumerableFilter
from office365.sharepoint.tenant.administration.spo_operation import SpoOperation
class Tenant(BaseEntity):
def __init__(self, context):
super().__init__(context, ResourcePath("Microsoft.Online.SharePoint.TenantAdministration.Tenant"),
"Microsoft.Online.SharePoint.TenantAdministration")
def get_site_secondary_administrators(self, site_id):
"""
Gets site collection administrators
:type site_id: str
"""
return_type = ClientValueCollection(SecondaryAdministratorsInfo)
payload = SecondaryAdministratorsFieldsData(site_id)
qry = ServiceOperationQuery(self, "GetSiteSecondaryAdministrators", None, payload,
"secondaryAdministratorsFieldsData", return_type)
self.context.add_query(qry)
return return_type
def set_site_secondary_administrators(self, site_id, emails, names=None):
"""
Sets site collection administrators
:type names: list[str] or None
:type emails: list[str]
:type site_id: str
"""
payload = SecondaryAdministratorsFieldsData(site_id, emails, names)
qry = ServiceOperationQuery(self, "SetSiteSecondaryAdministrators", None, payload,
"secondaryAdministratorsFieldsData", None)
self.context.add_query(qry)
return self
def register_hub_site(self, site_url):
"""
Registers an existing site as a hub site.
:param str site_url:
:return:
"""
return_type = HubSiteProperties(self.context)
params = {"siteUrl": site_url}
qry = ServiceOperationQuery(self, "RegisterHubSite", None, params, None, return_type)
self.context.add_query(qry)
return return_type
def unregister_hub_site(self, siteUrl):
"""
Unregisters a hub site so that it is no longer a hub site.
:param str siteUrl:
:return:
"""
params = {"siteUrl": siteUrl}
qry = ServiceOperationQuery(self, "UnregisterHubSite", None, params, None, None)
self.context.add_query(qry)
return self
def create_site(self, site_create_props):
"""Queues a site collection for creation with the specified properties.
:param SiteCreationProperties site_create_props:
A SiteCreationProperties object that contains the initial properties
of the new site collection.
"""
result = SpoOperation(self.context)
qry = ServiceOperationQuery(self, "CreateSite", None, site_create_props, "siteCreationProperties", result)
self.context.add_query(qry)
return result
def remove_site(self, site_url):
"""Deletes the site with the specified URL
:param str site_url: A string representing the URL of the site.
"""
result = SpoOperation(self.context)
qry = ServiceOperationQuery(self, "removeSite", [site_url], None, None, result)
self.context.add_query(qry)
return result
def remove_deleted_site(self, site_url):
pass
def restore_deleted_site(self, site_url):
pass
def get_site_properties_by_url(self, url, include_detail):
"""
:param str url: A string that represents the site URL.
:param bool include_detail: A Boolean value that indicates whether to include all of the SPSite properties.
"""
site_props = SiteProperties(self.context)
self._sites.add_child(site_props)
payload = {
'url': url,
'includeDetail': include_detail
}
qry = ServiceOperationQuery(self, "getSitePropertiesByUrl", None, payload, None, site_props)
self.context.add_query(qry)
return site_props
def get_site_properties_from_sharepoint_by_filters(self, _filter, start_index=0, include_detail=False):
"""
:param bool include_detail:
:param int start_index:
:param str _filter:
"""
site_props_col = SitePropertiesCollection(self.context)
qry = ServiceOperationQuery(self, "getSitePropertiesFromSharePointByFilters",
None,
SitePropertiesEnumerableFilter(_filter, start_index, include_detail),
"speFilter",
site_props_col)
self.context.add_query(qry)
return site_props_col
@property
def root_site_url(self):
"""
:rtype: str or None
"""
return self.properties.get('RootSiteUrl', None)
@property
def _sites(self):
"""Gets a collection of sites."""
if self.is_property_available('sites'):
return self.properties['sites']
else:
return SitePropertiesCollection(self.context, ResourcePath("sites", self.resource_path))
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.18.1333'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
self.open()
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
# if self.verbose:
# print('Waiting for tokens: Exchange: {0}'.format(self.id))
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
# if self.verbose:
# print('Adding new tokens: Exchange: {0}'.format(self.id))
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e: # base exception class
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
return http_response
async def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
|
import csv
def ClassFactory(class_name, dictionary):
return type(class_name, (object,), dictionary)
class CsvReader:
def __init__(self, filepath):
self.data = []
with open(filepath) as csv_files:
csv_data = csv.DictReader(csv_files, delimiter=',')
for row in csv_data:
self.data.append(row)
pass
def return_data_object(self, class_name):
objects = []
for row in self.data:
objects.append(ClassFactory(class_name, row))
return objects
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
import mock
from dashboard.pinpoint.models.quest import read_value
from tracing.value import histogram_set
from tracing.value import histogram as histogram_module
from tracing.value.diagnostics import generic_set
from tracing.value.diagnostics import reserved_infos
_BASE_ARGUMENTS_HISTOGRAMS = {'benchmark': 'speedometer'}
_BASE_ARGUMENTS_GRAPH_JSON = {
'chart': 'chart_name',
'trace': 'trace_name',
}
class ReadHistogramsJsonValueQuestTest(unittest.TestCase):
def testMinimumArguments(self):
quest = read_value.ReadHistogramsJsonValue.FromDict(
_BASE_ARGUMENTS_HISTOGRAMS)
expected = read_value.ReadHistogramsJsonValue('chartjson-output.json')
self.assertEqual(quest, expected)
def testAllArguments(self):
arguments = dict(_BASE_ARGUMENTS_HISTOGRAMS)
arguments['chart'] = 'timeToFirst'
arguments['tir_label'] = 'pcv1-cold'
arguments['trace'] = 'trace_name'
arguments['statistic'] = 'avg'
quest = read_value.ReadHistogramsJsonValue.FromDict(arguments)
expected = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', 'timeToFirst',
'pcv1-cold', 'trace_name', 'avg')
self.assertEqual(quest, expected)
def testPerformanceTestSuite(self):
arguments = dict(_BASE_ARGUMENTS_HISTOGRAMS)
arguments['target'] = 'performance_test_suite'
quest = read_value.ReadHistogramsJsonValue.FromDict(arguments)
expected = read_value.ReadHistogramsJsonValue(
'speedometer/perf_results.json')
self.assertEqual(quest, expected)
def testPerformanceTestSuiteWindows(self):
arguments = dict(_BASE_ARGUMENTS_HISTOGRAMS)
arguments['dimensions'] = [{'key': 'os', 'value': 'Windows-10'}]
arguments['target'] = 'performance_test_suite'
quest = read_value.ReadHistogramsJsonValue.FromDict(arguments)
expected = read_value.ReadHistogramsJsonValue(
'speedometer\\perf_results.json')
self.assertEqual(quest, expected)
class ReadGraphJsonValueQuestTest(unittest.TestCase):
def testAllArguments(self):
quest = read_value.ReadGraphJsonValue.FromDict(_BASE_ARGUMENTS_GRAPH_JSON)
expected = read_value.ReadGraphJsonValue('chart_name', 'trace_name')
self.assertEqual(quest, expected)
def testMissingChart(self):
arguments = dict(_BASE_ARGUMENTS_GRAPH_JSON)
del arguments['chart']
with self.assertRaises(TypeError):
read_value.ReadGraphJsonValue.FromDict(arguments)
def testMissingTrace(self):
arguments = dict(_BASE_ARGUMENTS_GRAPH_JSON)
del arguments['trace']
with self.assertRaises(TypeError):
read_value.ReadGraphJsonValue.FromDict(arguments)
class _ReadValueExecutionTest(unittest.TestCase):
def setUp(self):
patcher = mock.patch('dashboard.services.isolate.Retrieve')
self._retrieve = patcher.start()
self.addCleanup(patcher.stop)
def SetOutputFileContents(self, contents):
self._retrieve.side_effect = (
'{"files": {"chartjson-output.json": {"h": "output json hash"}}}',
json.dumps(contents),
)
def assertReadValueError(self, execution):
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertIsInstance(execution.exception, basestring)
last_exception_line = execution.exception.splitlines()[-1]
self.assertTrue(last_exception_line.startswith('ReadValueError'))
def assertReadValueSuccess(self, execution):
self.assertTrue(execution.completed)
self.assertFalse(execution.failed)
self.assertEqual(execution.result_arguments, {})
def assertRetrievedOutputJson(self):
expected_calls = [
mock.call('server', 'output hash'),
mock.call('server', 'output json hash'),
]
self.assertEqual(self._retrieve.mock_calls, expected_calls)
class ReadHistogramsJsonValueTest(_ReadValueExecutionTest):
def testReadHistogramsJsonValue(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnostic(
reserved_infos.STORY_TAGS.name,
generic_set.GenericSet(['group:tir_label']))
histograms.AddSharedDiagnostic(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist.name, 'tir_label', 'story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueStatistic(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnostic(
reserved_infos.STORY_TAGS.name,
generic_set.GenericSet(['group:tir_label']))
histograms.AddSharedDiagnostic(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist.name,
'tir_label', 'story', statistic='avg')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (1,))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueStatisticNoSamples(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnostic(
reserved_infos.STORY_TAGS.name,
generic_set.GenericSet(['group:tir_label']))
histograms.AddSharedDiagnostic(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist.name,
'tir_label', 'story', statistic='avg')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
def testReadHistogramsJsonValueMultipleHistograms(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
hist2 = histogram_module.Histogram('hist', 'count')
hist2.AddSample(0)
hist2.AddSample(1)
hist2.AddSample(2)
hist3 = histogram_module.Histogram('some_other_histogram', 'count')
hist3.AddSample(3)
hist3.AddSample(4)
hist3.AddSample(5)
histograms = histogram_set.HistogramSet([hist, hist2, hist3])
histograms.AddSharedDiagnostic(
reserved_infos.STORY_TAGS.name,
generic_set.GenericSet(['group:tir_label']))
histograms.AddSharedDiagnostic(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist.name, 'tir_label', 'story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2, 0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsTraceUrls(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url1', 'trace_url2']))
hist2 = histogram_module.Histogram('hist2', 'count')
hist2.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url3']))
hist3 = histogram_module.Histogram('hist3', 'count')
hist3.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url2']))
histograms = histogram_set.HistogramSet([hist, hist2, hist3])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name=hist.name)
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0,))
self.assertEqual(
{
'completed': True,
'exception': None,
'details': [
{
'key': 'trace',
'value': 'trace_url1',
'url': 'trace_url1',
},
{
'key': 'trace',
'value': 'trace_url2',
'url': 'trace_url2',
},
{
'key': 'trace',
'value': 'trace_url3',
'url': 'trace_url3',
},
],
},
execution.AsDict())
self.assertRetrievedOutputJson()
def testReadHistogramsDiagnosticRefSkipTraceUrls(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url1', 'trace_url2']))
hist2 = histogram_module.Histogram('hist2', 'count')
hist2.diagnostics[reserved_infos.TRACE_URLS.name] = (
generic_set.GenericSet(['trace_url3']))
hist2.diagnostics[reserved_infos.TRACE_URLS.name].guid = 'foo'
histograms = histogram_set.HistogramSet([hist, hist2])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name=hist.name)
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0,))
self.assertEqual(
{
'completed': True,
'exception': None,
'details': [
{
'key': 'trace',
'value': 'trace_url1',
'url': 'trace_url1',
},
{
'key': 'trace',
'value': 'trace_url2',
'url': 'trace_url2',
},
],
},
execution.AsDict())
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueWithNoTirLabel(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnostic(
reserved_infos.STORY_TAGS.name,
generic_set.GenericSet(['group:tir_label']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name=hist.name, tir_label='tir_label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueWithNoStory(self):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
histograms = histogram_set.HistogramSet([hist])
histograms.AddSharedDiagnostic(
reserved_infos.STORIES.name,
generic_set.GenericSet(['story']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name=hist.name, story='story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (0, 1, 2))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueSummary(self):
samples = []
hists = []
for i in xrange(10):
hist = histogram_module.Histogram('hist', 'count')
hist.AddSample(0)
hist.AddSample(1)
hist.AddSample(2)
hist.diagnostics[reserved_infos.STORIES.name] = (
generic_set.GenericSet(['story%d' % i]))
hists.append(hist)
samples.extend(hist.sample_values)
histograms = histogram_set.HistogramSet(hists)
histograms.AddSharedDiagnostic(
reserved_infos.STORY_TAGS.name,
generic_set.GenericSet(['group:tir_label']))
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name=hists[0].name, tir_label='tir_label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, tuple(samples))
self.assertRetrievedOutputJson()
def testReadHistogramsJsonValueWithMissingFile(self):
self._retrieve.return_value = '{"files": {}}'
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name='metric', tir_label='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
def testReadHistogramsJsonValueEmptyHistogramSet(self):
self.SetOutputFileContents([])
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name='metric', tir_label='test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
def testReadHistogramsJsonValueWithMissingHistogram(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name='does_not_exist')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
def testReadHistogramsJsonValueWithNoValues(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name='chart')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
def testReadHistogramsJsonValueTirLabelWithNoValues(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name='chart', tir_label='tir_label')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
def testReadHistogramsJsonValueStoryWithNoValues(self):
hist = histogram_module.Histogram('hist', 'count')
histograms = histogram_set.HistogramSet([hist])
self.SetOutputFileContents(histograms.AsDicts())
quest = read_value.ReadHistogramsJsonValue(
'chartjson-output.json', hist_name='chart', story='story')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
class ReadGraphJsonValueTest(_ReadValueExecutionTest):
def testReadGraphJsonValue(self):
self.SetOutputFileContents(
{'chart': {'traces': {'trace': ['126444.869721', '0.0']}}})
quest = read_value.ReadGraphJsonValue('chart', 'trace')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueSuccess(execution)
self.assertEqual(execution.result_values, (126444.869721,))
self.assertRetrievedOutputJson()
def testReadGraphJsonValueWithMissingFile(self):
self._retrieve.return_value = '{"files": {}}'
quest = read_value.ReadGraphJsonValue('metric', 'test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
def testReadGraphJsonValueWithMissingChart(self):
self.SetOutputFileContents({})
quest = read_value.ReadGraphJsonValue('metric', 'test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
def testReadGraphJsonValueWithMissingTrace(self):
self.SetOutputFileContents({'chart': {'traces': {}}})
quest = read_value.ReadGraphJsonValue('metric', 'test')
execution = quest.Start(None, 'server', 'output hash')
execution.Poll()
self.assertReadValueError(execution)
|
"""Tests for fan platforms."""
import pytest
from homeassistant.components.fan import FanEntity
class BaseFan(FanEntity):
"""Implementation of the abstract FanEntity."""
def __init__(self):
"""Initialize the fan."""
def test_fanentity():
"""Test fan entity methods."""
fan = BaseFan()
assert fan.state == "off"
assert fan.preset_modes is None
assert fan.supported_features == 0
assert fan.percentage_step == 1
assert fan.speed_count == 100
assert fan.capability_attributes == {}
# Test set_speed not required
with pytest.raises(NotImplementedError):
fan.oscillate(True)
with pytest.raises(AttributeError):
fan.set_speed("low")
with pytest.raises(NotImplementedError):
fan.set_percentage(0)
with pytest.raises(NotImplementedError):
fan.set_preset_mode("auto")
with pytest.raises(NotImplementedError):
fan.turn_on()
with pytest.raises(NotImplementedError):
fan.turn_off()
async def test_async_fanentity(hass):
"""Test async fan entity methods."""
fan = BaseFan()
fan.hass = hass
assert fan.state == "off"
assert fan.preset_modes is None
assert fan.supported_features == 0
assert fan.percentage_step == 1
assert fan.speed_count == 100
assert fan.capability_attributes == {}
# Test set_speed not required
with pytest.raises(NotImplementedError):
await fan.async_oscillate(True)
with pytest.raises(AttributeError):
await fan.async_set_speed("low")
with pytest.raises(NotImplementedError):
await fan.async_set_percentage(0)
with pytest.raises(NotImplementedError):
await fan.async_set_preset_mode("auto")
with pytest.raises(NotImplementedError):
await fan.async_turn_on()
with pytest.raises(NotImplementedError):
await fan.async_turn_off()
with pytest.raises(NotImplementedError):
await fan.async_increase_speed()
with pytest.raises(NotImplementedError):
await fan.async_decrease_speed()
@pytest.mark.parametrize(
"attribute_name, attribute_value",
[
("current_direction", "forward"),
("oscillating", True),
("percentage", 50),
("preset_mode", "medium"),
("preset_modes", ["low", "medium", "high"]),
("speed_count", 50),
("supported_features", 1),
],
)
def test_fanentity_attributes(attribute_name, attribute_value):
"""Test fan entity attribute shorthand."""
fan = BaseFan()
setattr(fan, f"_attr_{attribute_name}", attribute_value)
assert getattr(fan, attribute_name) == attribute_value
|
from ynab_api import __version__
def test_version():
assert __version__ == '0.1.0'
|
from vkbottle_types import GroupTypes
from vkbottle_types.events import GroupEventType, UserEventType
from .api import (
ABCAPI,
API,
DEFAULT_REQUEST_VALIDATORS,
DEFAULT_RESPONSE_VALIDATORS,
ABCRequestRescheduler,
ABCRequestValidator,
ABCResponseValidator,
ABCTokenGenerator,
BlockingRequestRescheduler,
ConsistentTokenGenerator,
SingleTokenGenerator,
Token,
get_token_generator,
)
from .dispatch import (
ABCDispenseView,
ABCHandler,
ABCRouter,
ABCRule,
ABCStateDispenser,
ABCView,
AndRule,
BaseMiddleware,
BaseReturnManager,
BaseStateGroup,
BuiltinStateDispenser,
MiddlewareError,
NotRule,
OrRule,
Router,
StatePeer,
)
from .exception_factory import (
ABCErrorHandler,
CaptchaError,
CodeException,
ErrorHandler,
VKAPIError,
swear,
)
from .framework import (
ABCBlueprint,
ABCFramework,
Bot,
BotBlueprint,
User,
UserBlueprint,
run_multibot,
)
from .http import ABCHTTPClient, AiohttpClient, SingleAiohttpClient
from .polling import ABCPolling, BotPolling, UserPolling
from .tools import (
EMPTY_KEYBOARD,
ABCAction,
ABCStorage,
ABCValidator,
AudioUploader,
AuthError,
BaseContext,
BaseUploader,
BotTypes,
CallableValidator,
Callback,
CtxStorage,
DelayedTask,
DocMessagesUploader,
DocUploader,
DocWallUploader,
EqualsValidator,
GraffitiUploader,
IsInstanceValidator,
Keyboard,
KeyboardButtonColor,
Location,
LoopWrapper,
OpenAppEvent,
OpenLink,
OpenLinkEvent,
PhotoChatFaviconUploader,
PhotoFaviconUploader,
PhotoMarketUploader,
PhotoMessageUploader,
PhotoToAlbumUploader,
PhotoUploader,
PhotoWallUploader,
ShowSnackbarEvent,
TemplateElement,
Text,
UserAuth,
UserTypes,
VideoUploader,
VKApps,
VKPay,
VoiceMessageUploader,
keyboard_gen,
load_blueprints_from_package,
run_in_task,
run_sync,
template_gen,
vkscript,
)
event_types = GroupTypes
__all__ = (
"ABCAction",
"ABCAPI",
"ABCBlueprint",
"ABCDispenseView",
"ABCErrorHandler",
"ABCFramework",
"ABCHandler",
"ABCHTTPClient",
"ABCPolling",
"ABCRequestRescheduler",
"ABCRequestValidator",
"ABCResponseValidator",
"ABCRouter",
"ABCRule",
"ABCStateDispenser",
"ABCStorage",
"ABCTokenGenerator",
"ABCValidator",
"ABCView",
"AiohttpClient",
"AndRule",
"API",
"AudioUploader",
"AuthError",
"BaseContext",
"BaseMiddleware",
"BaseReturnManager",
"BaseStateGroup",
"BaseUploader",
"BlockingRequestRescheduler",
"Bot",
"BotBlueprint",
"BotPolling",
"BotTypes",
"BuiltinStateDispenser",
"CallableValidator",
"Callback",
"CaptchaError",
"CodeException",
"ConsistentTokenGenerator",
"CtxStorage",
"DEFAULT_REQUEST_VALIDATORS",
"DEFAULT_RESPONSE_VALIDATORS",
"DelayedTask",
"DocMessagesUploader",
"DocUploader",
"DocWallUploader",
"EMPTY_KEYBOARD",
"EqualsValidator",
"ErrorHandler",
"get_token_generator",
"GraffitiUploader",
"GroupEventType",
"GroupTypes",
"IsInstanceValidator",
"keyboard_gen",
"Keyboard",
"KeyboardButtonColor",
"load_blueprints_from_package",
"Location",
"LoopWrapper",
"MiddlewareError",
"NotRule",
"OpenAppEvent",
"OpenLink",
"OpenLinkEvent",
"OrRule",
"PhotoChatFaviconUploader",
"PhotoFaviconUploader",
"PhotoMarketUploader",
"PhotoMessageUploader",
"PhotoToAlbumUploader",
"PhotoUploader",
"PhotoWallUploader",
"Router",
"run_in_task",
"run_multibot",
"run_sync",
"ShowSnackbarEvent",
"SingleAiohttpClient",
"SingleTokenGenerator",
"StatePeer",
"swear",
"template_gen",
"TemplateElement",
"Text",
"Token",
"User",
"UserAuth",
"UserBlueprint",
"UserEventType",
"UserPolling",
"UserTypes",
"VideoUploader",
"VKAPIError",
"VKApps",
"VKPay",
"vkscript",
"VoiceMessageUploader",
)
|
import cv2 as cv
image = cv.imread()
|
# -*- encoding: utf-8
from sqlalchemy.testing import eq_, engines
from sqlalchemy import *
from sqlalchemy import exc
from sqlalchemy.dialects.mssql import pyodbc, pymssql
from sqlalchemy.engine import url
from sqlalchemy.testing import fixtures
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing.mock import Mock
class ParseConnectTest(fixtures.TestBase):
def test_pyodbc_connect_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql:///?dsn=mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_dsn_non_trusted(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_dsn_extra(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@mydsn/?LANGUAGE=us_'
'english&foo=bar')
connection = dialect.create_connect_args(u)
dsn_string = connection[0][0]
assert ";LANGUAGE=us_english" in dsn_string
assert ";foo=bar" in dsn_string
def test_pyodbc_connect(self):
dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@hostspec/database')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_connect_comma_port(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec:12345/data'
'base')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec,12345;Database=datab'
'ase;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_config_port(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?p'
'ort=12345')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password;port=12345'], {}], connection)
def test_pyodbc_extra_connect(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?L'
'ANGUAGE=us_english&foo=bar')
connection = dialect.create_connect_args(u)
eq_(connection[1], {})
eq_(connection[0][0]
in ('DRIVER={SQL Server};Server=hostspec;Database=database;'
'UID=username;PWD=password;foo=bar;LANGUAGE=us_english',
'DRIVER={SQL Server};Server=hostspec;Database=database;UID='
'username;PWD=password;LANGUAGE=us_english;foo=bar'), True)
def test_pyodbc_odbc_connect(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=DRIVER%3D%7BSQL+Server'
'%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase'
'%3BUID%3Dusername%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_odbc_connect_with_dsn(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=dsn%3Dmydsn%3BDatabase'
'%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword'
)
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Database=database;UID=username;PWD=password'],
{}], connection)
def test_pyodbc_odbc_connect_ignores_other_values(self):
dialect = pyodbc.dialect()
u = \
url.make_url('mssql://userdiff:passdiff@localhost/dbdiff?od'
'bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer'
'%3Dhostspec%3BDatabase%3Ddatabase%3BUID%3Duse'
'rname%3BPWD%3Dpassword')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pymssql_port_setting(self):
dialect = pymssql.dialect()
u = \
url.make_url('mssql+pymssql://scott:tiger@somehost/test')
connection = dialect.create_connect_args(u)
eq_(
[[], {'host': 'somehost', 'password': 'tiger',
'user': 'scott', 'database': 'test'}], connection
)
u = \
url.make_url('mssql+pymssql://scott:tiger@somehost:5000/test')
connection = dialect.create_connect_args(u)
eq_(
[[], {'host': 'somehost:5000', 'password': 'tiger',
'user': 'scott', 'database': 'test'}], connection
)
def test_pymssql_disconnect(self):
dialect = pymssql.dialect()
for error in [
'Adaptive Server connection timed out',
'Net-Lib error during Connection reset by peer',
'message 20003',
'Error 10054',
'Not connected to any MS SQL server',
'Connection is closed'
]:
eq_(dialect.is_disconnect(error, None, None), True)
eq_(dialect.is_disconnect("not an error", None, None), False)
@testing.only_on(['mssql+pyodbc', 'mssql+pymssql'],
"FreeTDS specific test")
def test_bad_freetds_warning(self):
engine = engines.testing_engine()
def _bad_version(connection):
return 95, 10, 255
engine.dialect._get_server_version_info = _bad_version
assert_raises_message(exc.SAWarning,
'Unrecognized server version info',
engine.connect)
class VersionDetectionTest(fixtures.TestBase):
def test_pymssql_version(self):
dialect = pymssql.MSDialect_pymssql()
for vers in [
"Microsoft SQL Server Blah - 11.0.9216.62",
"Microsoft SQL Server (XYZ) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation",
"Microsoft SQL Azure (RTM) - 11.0.9216.62 \n"
"Jul 18 2014 22:00:21 \nCopyright (c) Microsoft Corporation"
]:
conn = Mock(scalar=Mock(return_value=vers))
eq_(
dialect._get_server_version_info(conn),
(11, 0, 9216, 62)
)
|
#!python
# coding=utf-8
import cfg, cmn, vqw
import cookielib, urllib, urllib2, sys, argparse, re, string
import simplem3u8 as sm3u8
import simplejson as json
from urllib2 import Request, urlopen, URLError
from random import random
cmnHandler = cmn.cmnHandler()
_url_re = re.compile(r"""
http(s)?://(\w+.)?wasd\.tv/
(?:
channel/(?P<channel_id>\d+)
)
(?:
/(?:videos/)?
(?P<video_id>\d+)
)?
""", re.VERBOSE)
class wasdAPIHandler:
def __init__(self):
self.baseurl = 'https://wasd.tv/api'
return None
def getURL(self, url):
request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
retData = response.read()
response.close()
return retData
except URLError, e:
print e
return None
def getApiURL(self, url):
# Request the anon token and get the cookies
authUrl = "%s/auth/anon-token" % (self.baseurl)
cookies = cookielib.CookieJar()
handlers = [
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.HTTPCookieProcessor(cookies)
]
opener = urllib2.build_opener(*handlers)
authRequest = urllib2.Request(authUrl)
opener.open(authRequest)
# Request the endpoint
request = urllib2.Request(url)
request.add_header('User-Agent', cmnHandler.spoofAs('CHROME'))
try:
response = opener.open(request)
retData = response.read()
response.close()
return retData
except URLError, e:
print e
return None
def call(self, endpoint, query = None):
url = "%s/%s" % (self.baseurl, endpoint)
if (query):
queryArgs = urllib.urlencode(query)
url = "%s/%s?%s" % (self.baseurl, endpoint, queryArgs)
return self.getApiURL(url)
def getVideoInfoByID(self, videoId):
endpoint = "media-containers/%s" % (videoId)
query = {
"media_container_status": "RUNNING",
"limit": 1,
"offset": 0,
"channel_id": videoId,
"media_container_type": "SINGLE,COOP"
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
def getStreamInfoByID(self, streamId):
endpoint = "media-containers"
query = {
"media_container_status": "RUNNING",
"limit": 1,
"offset": 0,
"channel_id": streamId,
"media_container_type": "SINGLE,COOP"
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
def getTopGames(self, page = 0, limit = 50):
endpoint = "games"
query = {
"limit": limit,
"offset": page
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
def searchByGameTitle(self, title, page = 0, limit = 50):
endpoint = "search/games"
query = {
"search_phrase": title,
"limit": limit,
"offset": page
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
def getTopStreamsByGameID(self, id, page = 0, limit = 50):
endpoint = "media-containers"
query = {
"media_container_status": "RUNNING",
"game_id": id,
"media_container_type": "SINGLE,COOP",
"order_direction": "DESC",
"order_type": "VIEWERS",
"limit": limit,
"offset": page
}
responseData = self.call(endpoint, query)
if responseData:
return json.loads(responseData)
return None
class helpersHandler:
def parseURL(self, url):
return _url_re.match(url).groupdict()
def getVideoType(self, url):
types = self.parseURL(url)
return {'channel': types['channel_id'], 'video': types['video_id']}
def getPrefferedVideoURL(self, data):
sm3u8Parser = sm3u8.parseHandler()
playlists = sm3u8Parser.parse(data)
for quality in vqw.wasdVQW:
for idx in playlists:
if (playlists[idx]):
streamQuality = playlists[idx]
if (streamQuality['resolution'].find(quality) >= 0):
return playlists[idx]['uri']
sys.exit()
return None
def clearUri(self, uri):
uriSplit = uri.split('#')
return uriSplit[0]
def main(argv):
wasdApi = wasdAPIHandler()
helpers = helpersHandler()
if len(argv) == 0:
print "No arguments given. Use wasd.py -h for more info.\nThe script must be used from the shell."
sys.exit()
# Parse the arguments
argParser = argparse.ArgumentParser(description=cmnHandler.getScriptDescription('wasd.tv'), epilog=cmnHandler.getScriptEpilog(),
formatter_class=argparse.RawDescriptionHelpFormatter)
argParser.add_argument('-u', '--url', action='store', dest='url', help='The video/channel url')
argParser.add_argument('-q', '--quality', action='store', dest='quality', help='Set the preffered video quality. This is optional. If not set or if it is not available the default quality weight will be used.')
argParser.add_argument('-tg', '--top-games', action='store_true', default=False, dest='topgames', help='Get a list of the current Top Games with live streams available, based on the number of viewers')
argParser.add_argument('-sg', '--search-game', action='store', dest='searchgame', help='Search for available streams based on game title/id')
argParser.add_argument('-shh', '--silence', action='store_true', default=False, dest='silence', help='If this is set, the script will not output anything, except of errors.')
args = argParser.parse_args()
if (args.silence != True):
cmnHandler.showIntroText()
if (args.url):
videoType = helpers.getVideoType(args.url)
if (args.quality):
vqw.wasdVQW.insert(0, args.quality)
if (args.topgames):
gamesList = wasdApi.getTopGames()
print "%-10s\t %-50s\t %-10s\t %-10s" % ('Game ID', 'Game', 'Viewers', 'Streams')
print "%s" % ('-'*200)
for game in gamesList['result']:
print "%-10s\t %-50s\t %-10d\t %-10d" % (game['game_id'], cmnHandler.uniStrip(game['game_name']), game['viewers_count'], game['stream_count'])
sys.exit()
if (args.searchgame):
gameTitle = args.searchgame
gameId = 0
try:
if int(gameTitle) >= 1:
gameId = gameTitle
except ValueError:
gameData = wasdApi.searchByGameTitle(gameTitle)
if gameData['result']['count'] > 1:
gamesList = gameData['result']['rows']
print "Found more than one game with the title %s. Select the one you want by the Game ID"
print "%-10s\t %-50s\t %-10s\t %-10s" % ('Game ID', 'Game', 'Viewers', 'Streams')
print "%s" % ('-'*200)
for game in gamesList:
print "%-10s\t %-50s\t %-10d\t %-10d" % (game['game_id'], cmnHandler.uniStrip(game['game_name']), game['viewers_count'], game['stream_count'])
else:
gameId = gameData['result']['rows'][0]['game_id']
if gameId > 0:
gameStreams = wasdApi.getTopStreamsByGameID(gameId)
if gameStreams:
print "%-36s\t %-10s\t %s" % ('URL', 'Viewers', 'Title')
print "%s" % ('-'*200)
for stream in gameStreams['result']:
streamUrl = "https://wasd.tv/channel/%s" % (stream['channel_id'])
print "%-36s\t %-10d\t %s" % (streamUrl, stream['media_container_streams'][0]['stream_current_viewers'], cmnHandler.uniStrip(stream['media_container_name']))
else:
print "No streams found for the game: %s" % (gameTitle)
sys.exit()
if (videoType['channel'] or videoType['video']):
if videoType['video']:
streamId = videoType['video']
streams = wasdApi.getVideoInfoByID(streamId)
stream_result = streams['result']
else:
streamId = videoType['channel']
streams = wasdApi.getStreamInfoByID(streamId)
stream_result = streams['result'][0]
stream_media = stream_result['media_container_streams'][0]['stream_media']
if (stream_media):
m3u8Response = wasdApi.getURL(stream_media[0]['media_meta']['media_url'])
if (m3u8Response):
uri = helpers.getPrefferedVideoURL(m3u8Response)
uri = helpers.clearUri(uri)
if uri:
if cfg.verbose and (args.silence != True):
print "%s" % (uri)
if cfg.autoplay:
cmnHandler.videoAutoplay(uri, 'list')
else:
print "Not valid video found"
else:
print "There is no video available!"
sys.exit()
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
|
# Import spacy
import spacy
# Instantiate the English model: nlp
nlp = spacy.load('en', tagger=False, parser= False, matcher = False)
# Create a new document: doc
doc = nlp(article)
# Print all of the found entities and their labels
for ent in doc.ents:
print(ent.label_, ent.text)
""" <script.py> output:
ORG Uber
ORG Uber
ORG Apple
ORG Uber
ORG Uber
PERSON Travis Kalanick
ORG Uber
PERSON Tim Cook
ORG Apple
CARDINAL Millions
ORG Uber
GPE drivers’
LOC Silicon Valley’s
ORG Yahoo
PERSON Marissa Mayer
MONEY $186m """
|
import glob
import cv2
import os
def extract_frame(movie_files_dir, out_dir):
movie_files = glob.glob(movie_files_dir)
if not movie_files:
print('movie files are not found.')
return
for movie_file in movie_files:
ext = movie_file.split('.')[-1]
if not ext == 'mp4' or not ext == 'MP4':
print(f"can't extract this movie file: {movie_file}")
continue
out_dir = out_dir
if not os.path.exists(out_dir):
os.mkdir(out_dir)
cap = cv2.VideoCapture(movie_file)
if not cap.isOpened():
print(f"can't extract this movie file: {movie_file}")
return
digit = len(str(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))))
n = 0
while True:
ret, frame = cap.read()
if ret:
cv2.imwrite(f"{movie_file}_{str(n).zfill(digit)}.jpg", frame)
n += 1
continue
return
print(f'{len(movie_files)} movie files extracted')
if __name__ == '__main__':
movie_files_dir = 'movies/*.mp4'
out_dir = 'out/'
extract_frame(movie_files_dir, out_dir)
|
#!/usr/bin/env python
from ping360_sonar.sensor import Ping360
from numpy import pi, sqrt, tan, cos, sign
from brping import definitions
class SonarInterface:
samplePeriodTickDuration = 25e-9
firmwareMinTransmitDuration = 5
firmwareMaxTransmitDuration = 500
firmwareMaxSamples = 1200
firmwareMinSamplePeriod = 80
maxDurationRatio = 64e6
def __init__(self, port, baudrate, fallback_emulated):
self.angle = 0
try:
self.sonar = Ping360(port, baudrate)
if self.sonar.initialize():
return
except:
pass
if not fallback_emulated:
raise RuntimeError('Cannot initialize sonar')
print('Using emulated sonar')
self.sonar = None
def configureAngles(self, aperture_deg, step_deg, ensure_divisor):
# to gradians
target_half_aperture = int(aperture_deg*200/360+0.5)
best_half_aperture = target_half_aperture
self.angle_step = int(round(step_deg*400/360))
# ensure angle_step is a divisor of max-min in gradians, necessary for LaserScan messages
if ensure_divisor:
# look around step, allow increased aperture
target_step = self.angle_step
# not too far from requested aperture, as close as possible to requested step (impacts turn duration)
computeCost = lambda step,half_aperture: 1000 if half_aperture%step != 0 else abs(step-target_step) + abs(half_aperture-target_half_aperture)
best_cost = computeCost(self.angle_step, target_half_aperture)
if best_cost != 0:
for step in range(1, target_step*2):
for half_aperture in range(target_half_aperture, min(target_half_aperture+10, 200)+1):
cost = computeCost(step, half_aperture)
if cost < best_cost:
best_cost = cost
self.angle_step = step
best_half_aperture = half_aperture
self.angle_min = -best_half_aperture
self.angle_max = best_half_aperture
if self.angle_max == 200:
self.angle_max -= self.angle_step
if self.angle < self.angle_min or self.angle > self.angle_max or (self.angle-self.angle_min) % self.angle_step != 0:
self.angle = 0
@staticmethod
def grad2rad(grad):
return grad*pi/200
def angleMin(self):
return self.grad2rad(self.angle_min)
def angleMax(self):
return self.grad2rad(self.angle_max)
def angleStep(self):
return self.grad2rad(self.angle_step)
def currentAngle(self):
return self.grad2rad(self.angle)
def angleCount(self):
return (self.angle_max-self.angle_min)//self.angle_step
def angleIndex(self):
if self.angle_step > 0:
return (self.angle-self.angle_min)//self.angle_step
return (self.angle-self.angle_max)//self.angle_step
def rangeFrom(self, index):
return (index+1)*self.max_range/self.samples
def configureTransducer(self, gain, frequency, speed_of_sound, max_range):
self.gain = gain
self.frequency = frequency
self.samples = int(min(self.firmwareMaxSamples,2*max_range/(self.firmwareMinSamplePeriod*speed_of_sound*self.samplePeriodTickDuration)))
self.sample_period = int((2.*max_range)/
(self.samples*speed_of_sound*self.samplePeriodTickDuration));
#* Per firmware engineer:
#* 1. Starting point is TxPulse in usec = ((one-way range in metres) * 8000) / (Velocity of sound in metres
#* per second)
#* 2. Then check that TxPulse is wide enough for currently selected sample interval in usec, i.e.,
#* if TxPulse < (2.5 * sample interval) then TxPulse = (2.5 * sample interval)
#* 3. Perform limit checking
#1
one_way_duration_us = (8000.*max_range)/speed_of_sound
# 2 (transmit duration is microseconds, sample_period_ns is nanoseconds)
sample_period_ns = self.sample_period * self.samplePeriodTickDuration
self.transmit_duration = max(2.5*sample_period_ns/1000, one_way_duration_us)
# 3 ensure bounds
if self.transmit_duration < self.firmwareMinTransmitDuration:
self.transmit_duration = self.firmwareMinTransmitDuration
else:
max_duration = min(self.firmwareMaxTransmitDuration, sample_period_ns*self.maxDurationRatio)
if self.transmit_duration > max_duration:
self.transmit_duration = max_duration
self.transmit_duration = int(self.transmit_duration)
def transmitDuration(self):
# microseconds to seconds
return self.transmit_duration/1e6
def updateAngle(self):
self.angle += self.angle_step
if self.angle_min == -200:
# full scan
end_turn = self.angle + self.angle_step > self.angle_max
if self.angle > self.angle_max:
self.angle = self.angle_min
return end_turn
# sector scan, check near end of sector
if self.angle + self.angle_step >= self.angle_max or self.angle + self.angle_step <= self.angle_min:
self.angle_step *= -1
return True
return False
def read(self):
# update angle before transmit
end_turn = self.updateAngle()
if self.sonar is not None:
print(f'transmit: {self.transmit_duration}')
self.sonar.control_transducer(
0, # reserved
self.gain,
self.angle,
self.transmit_duration,
self.sample_period,
self.frequency,
self.samples,
1,
0)
self.sonar.wait_message([definitions.PING360_DEVICE_DATA, definitions.COMMON_NACK], 4.0)
self.data = bytearray(self.sonar._data)
return (len(self.data) != 0, end_turn)
# emulated sonar
from random import randint
from time import sleep
self.data = [0 for _ in range(self.samples)]
scale = 5*abs((self.angle+400) % 400 - 200)
for i in range(self.samples):
if randint(self.samples,2*self.samples) < 1.1*i + scale:
self.data[i] = randint(220, 255)
# emulate transmit duration in microseconds
#sleep(self.transmit_duration/1000000)
return (True, end_turn)
# handles an angular sector of the image
class Bound:
radius = 0
def __init__(self, x, tm, tM):
self.x = x
if type(tM) == int:
self.low = Bound.clamp(tm*x)
self.up = int(tM*sqrt(Bound.radius**2-x**2-1))
else:
self.low = Bound.clamp(x*tm)
self.up = Bound.clamp(x*tM)
if self.up**2 + x**2 > Bound.radius**2:
self.up = int(sign(self.up) * sqrt(Bound.radius**2-x**2-1))
if self.up < self.low:
self.low,self.up = self.up,self.low
#staticmethod
def clamp(coord):
if coord < -Bound.radius+1:
return -Bound.radius+1
elif coord > Bound.radius-1:
return Bound.radius-1
return int(coord)
class Sector:
def __init__(self):
self.dr = None
def configure(self, samples, radius):
self.dr = radius/samples
Bound.radius = radius
def init(self, angle, step):
angle_min = angle-step/2
angle_max = angle+step/2
xmin, xmax,same_side = self.xLimits(angle_min, angle_max)
tm, tM = tan(angle_min), tan(angle_max)
self.bounds = []
if same_side:
# same side
if abs(tm) > abs(tM):
tm,tM = tM,tm
for x in range(xmin, xmax+1):
self.bounds.append(Bound(x,tm,tM))
else:
f = 1 if abs(angle-pi/2) < abs(angle+pi/2) else -1
if f == -1:
tm,tM = tM,tm
for x in range(xmin, 0):
self.bounds.append(Bound(x, tM,f))
for x in range(0, xmax+1):
self.bounds.append(Bound(x, tm,f))
self.cur = -1
def xLimits(self, angle_min, angle_max):
cm = cos(angle_min)
cM = cos(angle_max)
if cM < cm:
cm,cM = cM,cm
if cm*cM > 0:
if cM < 0:
cM = 0
else:
cm = 0
return Bound.clamp(round(Bound.radius*cm)), Bound.clamp(round(Bound.radius*cM)), cm*cM >= 0
def nextPoint(self, x, y):
if self.cur == -1:
self.cur = 0
x = self.bounds[0].x
y = self.bounds[0].low
elif y < self.bounds[self.cur].up:
y += 1
else:
self.cur += 1
if self.cur == len(self.bounds):
return False, 0, 0, 0
x = self.bounds[self.cur].x
y = self.bounds[self.cur].low
return True, x, y, int(round(sqrt(x*x+y*y)/self.dr))
|
"""
MD Analysis
===========
#. :class:`.MDAnalysis`
Class for converting a molecule to and back from an MDAnalysis object.
"""
import logging
from ...utilities import WrapperNotInstalledException
try:
import MDAnalysis as mda
except ModuleNotFoundError:
mda = None
logger = logging.getLogger(__name__)
class MDAnalysis:
"""
Converter for :class:`stk.Molecule` to and from MDAnalysis.
Examples
--------
An stk molecule can be converted into an MDAnalysis Universe.
.. code-block:: python
import stk
import stko
stkmol = stk.BuildingBlock('NCCNCCN').with_centroid(
position=np.array((10, 10, 10))
)
universe = stko.MDAnalysis().get_universe(stkmol)
print('R_g:', universe.atoms.radius_of_gyration())
print('B_sphere:', universe.atoms.bsphere())
print('Universe COM:', universe.atoms.center_of_mass())
print('stk centroid:', stkmol.get_centroid())
"""
def __init__(self):
if mda is None:
raise WrapperNotInstalledException(
'MDAnalysis is not installed; see README for '
'installation.'
)
def get_universe(self, mol):
"""
Get an MDAnalysis object.
Parameters
----------
mol : :class:`stk.Molecule`
Molecule to convert.
Returns
-------
:class:`MDAnalysis.Universe`
The MDAnalysis Universe of the molecule.
"""
rdkit_mol = mol.to_rdkit_mol()
return mda.Universe(rdkit_mol)
|
def fact (n):
if n == 1:
return 1
else:
return n*fact(n-1)
print(fact(5))
|
import torch
from torch.optim import Optimizer
class OptimWrapper(Optimizer):
# Mixin class that defines convenient functions for writing Optimizer Wrappers
def __init__(self, optim):
self.optim = optim
def __getstate__(self):
return self.optim.__getstate__()
def __setstate__(self, state):
self.optim.__setstate__(state)
@property
def state(self):
return self.optim.state
@property
def param_groups(self):
return self.optim.param_groups
@param_groups.setter
def param_groups(self, value):
self.optim.param_groups = value
def state_dict(self):
return self.optim.state_dict()
def load_state_dict(self, state_dict):
self.optim.load_state_dict(state_dict)
def zero_grad(self):
self.optim.zero_grad()
def add_param_group(self, param_group):
self.optim.add_param_group(param_group)
@property
def defaults(self):
return self.optim.defaults
@defaults.setter
def defaults(self, defaults):
self.optim.defaults = defaults
@torch.no_grad()
def step(self, closure=None):
self.optim.step(closure=closure)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.optim)
|
import os
from pytest import fixture
@fixture(scope='function')
def environ(request):
origin = dict(os.environ)
@request.addfinalizer
def restore_environ():
os.environ.clear()
os.environ.update(origin)
return os.environ
|
#!/usr/bin/python3
'''
(C) Copyright 2018-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from apricot import TestWithServers
from test_utils_pool import TestPool
class DestroyRebuild(TestWithServers):
"""Test class for pool destroy tests.
Test Class Description:
This test verifies destruction of a pool that is rebuilding.
:avocado: recursive
"""
# also remove the commented line form yaml file for rank 0
CANCEL_FOR_TICKET = [["DAOS-4891", "rank_to_kill", "[0]"]]
def test_destroy_while_rebuilding(self):
"""Jira ID: DAOS-xxxx.
Test Description:
Create a pool across multiple servers. After excluding one of the
servers, verify that the pool can be destroyed during rebuild.
Use Cases:
Verifying that a pool can be destroyed during rebuild.
:avocado: tags=all,daily_regression,medium
:avocado: tags=pool,destroypoolrebuild
"""
# Get the test parameters
self.pool = TestPool(self.context, self.get_dmg_command())
self.pool.get_params(self)
targets = self.params.get("targets", "/run/server_config/servers/*")
ranks = self.params.get("rank_to_kill", "/run/testparams/*")
# Create a pool
self.pool.create()
# Verify the pool information before starting rebuild
checks = {
"pi_nnodes": len(self.hostlist_servers),
"pi_ntargets": len(self.hostlist_servers) * targets,
"pi_ndisabled": 0,
}
self.assertTrue(
self.pool.check_pool_info(**checks),
"Invalid pool information detected prior to rebuild")
# Start rebuild
self.server_managers[0].stop_ranks(ranks, self.d_log, force=True)
self.pool.wait_for_rebuild(True)
# Destroy the pool while rebuild is active
self.pool.destroy()
self.log.info("Test Passed")
self.get_dmg_command().system_start(",".join(ranks))
self.server_managers[0].update_expected_states(",".join(ranks), ["joined"])
|
import os
import re
import socket
import ttfw_idf
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_Protocols')
def test_examples_protocol_asio_udp_server(env, extra_data):
"""
steps: |
1. join AP
2. Start server
3. Test connects to server and sends a test message
4. Test evaluates received test message from server
5. Test evaluates received test message on server stdout
"""
test_msg = b'echo message from client to server'
dut1 = env.get_dut('udp_echo_server', 'examples/protocols/asio/udp_echo_server', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'asio_udp_echo_server.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('asio_udp_echo_server_bin_size', '{}KB'.format(bin_size // 1024))
# 1. start test
dut1.start_app()
# 2. get the server IP address
data = dut1.expect(re.compile(r' IPv4 address: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)'), timeout=30)
# 3. create tcp client and connect to server
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.settimeout(30)
cli.connect((data[0], 2222))
cli.send(test_msg)
data = cli.recv(1024)
# 4. check the message received back from the server
if (data == test_msg):
print('PASS: Received correct message')
pass
else:
print('Failure!')
raise ValueError('Wrong data received from asio udp server: {} (expected:{})'.format(data, test_msg))
# 5. check the client message appears also on server terminal
dut1.expect(test_msg.decode())
if __name__ == '__main__':
test_examples_protocol_asio_udp_server()
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import os
import shutil
import tempfile
import time
import traceback
from collections import OrderedDict
from urllib.parse import quote_plus
import ctk
import qt
import SampleData
import SimpleITK as sitk
import sitkUtils
import slicer
import vtk
import vtkSegmentationCore
from MONAILabelLib import GenericAnatomyColors, MONAILabelClient
from slicer.ScriptedLoadableModule import *
from slicer.util import VTKObservationMixin
class MONAILabel(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "MONAILabel"
self.parent.categories = ["Active Learning"]
self.parent.dependencies = []
self.parent.contributors = ["NVIDIA, KCL"]
self.parent.helpText = """
Active Learning solution.
See more information in <a href="https://github.com/Project-MONAI/MONAILabel">module documentation</a>.
"""
self.parent.acknowledgementText = """
Developed by NVIDIA, KCL
"""
# Additional initialization step after application startup is complete
slicer.app.connect("startupCompleted()", self.initializeAfterStartup)
def initializeAfterStartup(self):
if not slicer.app.commandOptions().noMainWindow:
self.settingsPanel = MONAILabelSettingsPanel()
slicer.app.settingsDialog().addPanel("MONAI Label", self.settingsPanel)
class _ui_MONAILabelSettingsPanel(object):
def __init__(self, parent):
vBoxLayout = qt.QVBoxLayout(parent)
# settings
groupBox = ctk.ctkCollapsibleGroupBox()
groupBox.title = "MONAI Label Server"
groupLayout = qt.QFormLayout(groupBox)
serverUrl = qt.QLineEdit()
groupLayout.addRow("Server address:", serverUrl)
parent.registerProperty("MONAILabel/serverUrl", serverUrl, "text", str(qt.SIGNAL("textChanged(QString)")))
serverUrlHistory = qt.QLineEdit()
groupLayout.addRow("Server address history:", serverUrlHistory)
parent.registerProperty(
"MONAILabel/serverUrlHistory", serverUrlHistory, "text", str(qt.SIGNAL("textChanged(QString)"))
)
fileExtension = qt.QLineEdit()
fileExtension.setText(".nii.gz")
fileExtension.toolTip = "Default extension for uploading images/labels"
groupLayout.addRow("File Extension:", fileExtension)
parent.registerProperty(
"MONAILabel/fileExtension", fileExtension, "text", str(qt.SIGNAL("textChanged(QString)"))
)
clientId = qt.QLineEdit()
clientId.setText("user-xyz")
clientId.toolTip = "Client/User ID that will be sent to MONAI Label server for reference"
groupLayout.addRow("Client/User-ID:", clientId)
parent.registerProperty("MONAILabel/clientId", clientId, "text", str(qt.SIGNAL("textChanged(QString)")))
autoRunSegmentationCheckBox = qt.QCheckBox()
autoRunSegmentationCheckBox.checked = False
autoRunSegmentationCheckBox.toolTip = (
"Enable this option to auto run segmentation if pre-trained model exists when Next Sample is fetched"
)
groupLayout.addRow("Auto-Run Pre-Trained Model:", autoRunSegmentationCheckBox)
parent.registerProperty(
"MONAILabel/autoRunSegmentationOnNextSample",
ctk.ctkBooleanMapper(autoRunSegmentationCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
autoFetchNextSampleCheckBox = qt.QCheckBox()
autoFetchNextSampleCheckBox.checked = False
autoFetchNextSampleCheckBox.toolTip = "Enable this option to fetch Next Sample after saving the label"
groupLayout.addRow("Auto-Fetch Next Sample:", autoFetchNextSampleCheckBox)
parent.registerProperty(
"MONAILabel/autoFetchNextSample",
ctk.ctkBooleanMapper(autoFetchNextSampleCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
autoUpdateModelCheckBox = qt.QCheckBox()
autoUpdateModelCheckBox.checked = True
autoUpdateModelCheckBox.toolTip = "Enable this option to auto update model after submitting the label"
groupLayout.addRow("Auto-Update Model:", autoUpdateModelCheckBox)
parent.registerProperty(
"MONAILabel/autoUpdateModel",
ctk.ctkBooleanMapper(autoUpdateModelCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
askForUserNameCheckBox = qt.QCheckBox()
askForUserNameCheckBox.checked = False
askForUserNameCheckBox.toolTip = "Enable this option to ask for the user name every time the MONAILabel extension is loaded for the first time"
groupLayout.addRow("Ask For User Name:", askForUserNameCheckBox)
parent.registerProperty(
"MONAILabel/askForUserName",
ctk.ctkBooleanMapper(askForUserNameCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
allowOverlapCheckBox = qt.QCheckBox()
allowOverlapCheckBox.checked = False
allowOverlapCheckBox.toolTip = "Enable this option to allow overlapping segmentations"
groupLayout.addRow("Allow Overlapping Segmentations:", allowOverlapCheckBox)
parent.registerProperty(
"MONAILabel/allowOverlappingSegments",
ctk.ctkBooleanMapper(allowOverlapCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
allowOverlapCheckBox.connect("toggled(bool)", self.onUpdateAllowOverlap)
developerModeCheckBox = qt.QCheckBox()
developerModeCheckBox.checked = False
developerModeCheckBox.toolTip = "Enable this option to find options tab etc..."
groupLayout.addRow("Developer Mode:", developerModeCheckBox)
parent.registerProperty(
"MONAILabel/developerMode",
ctk.ctkBooleanMapper(developerModeCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))),
"valueAsInt",
str(qt.SIGNAL("valueAsIntChanged(int)")),
)
vBoxLayout.addWidget(groupBox)
vBoxLayout.addStretch(1)
def onUpdateAllowOverlap(self):
if slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", True, converter=slicer.util.toBool):
if slicer.util.settingsValue("MONAILabel/fileExtension", None) != ".seg.nrrd":
slicer.util.warningDisplay(
"Overlapping segmentations are only availabel with the '.seg.nrrd' file extension! Consider changing MONAILabel file extension."
)
class MONAILabelSettingsPanel(ctk.ctkSettingsPanel):
def __init__(self, *args, **kwargs):
ctk.ctkSettingsPanel.__init__(self, *args, **kwargs)
self.ui = _ui_MONAILabelSettingsPanel(self)
class MONAILabelWidget(ScriptedLoadableModuleWidget, VTKObservationMixin):
def __init__(self, parent=None):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.__init__(self, parent)
VTKObservationMixin.__init__(self) # needed for parameter node observation
self.logic = None
self._parameterNode = None
self._volumeNode = None
self._segmentNode = None
self._volumeNodes = []
self._updatingGUIFromParameterNode = False
self._scribblesEditorWidget = None
self.info = {}
self.models = OrderedDict()
self.trainers = OrderedDict()
self.config = OrderedDict()
self.current_sample = None
self.samples = {}
self.state = {
"SegmentationModel": "",
"DeepgrowModel": "",
"ScribblesMethod": "",
"CurrentStrategy": "",
"CurrentTrainer": "",
}
self.file_ext = ".nii.gz"
self.dgPositiveFiducialNode = None
self.dgPositiveFiducialNodeObservers = []
self.dgNegativeFiducialNode = None
self.dgNegativeFiducialNodeObservers = []
self.ignoreFiducialNodeAddEvent = False
self.progressBar = None
self.tmpdir = None
self.timer = None
self.scribblesMode = None
self.multi_label = False
def setup(self):
"""
Called when the user opens the module the first time and the widget is initialized.
"""
ScriptedLoadableModuleWidget.setup(self)
# Load widget from .ui file (created by Qt Designer).
# Additional widgets can be instantiated manually and added to self.layout.
uiWidget = slicer.util.loadUI(self.resourcePath("UI/MONAILabel.ui"))
self.layout.addWidget(uiWidget)
self.ui = slicer.util.childWidgetVariables(uiWidget)
# Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's
# "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's.
# "setMRMLScene(vtkMRMLScene*)" slot.
uiWidget.setMRMLScene(slicer.mrmlScene)
# These connections ensure that we update parameter node when scene is closed
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose)
self.addObserver(slicer.mrmlScene, slicer.mrmlScene.NodeAddedEvent, self.onSceneEndImport)
# Create logic class. Logic implements all computations that should be possible to run
# in batch mode, without a graphical user interface.
self.tmpdir = slicer.util.tempDirectory("slicer-monai-label")
self.logic = MONAILabelLogic(self.tmpdir)
# Set icons and tune widget properties
self.ui.serverComboBox.lineEdit().setPlaceholderText("enter server address or leave empty to use default")
self.ui.fetchServerInfoButton.setIcon(self.icon("refresh-icon.png"))
self.ui.segmentationButton.setIcon(self.icon("segment.png"))
self.ui.nextSampleButton.setIcon(self.icon("segment.png"))
self.ui.saveLabelButton.setIcon(self.icon("save.png"))
self.ui.trainingButton.setIcon(self.icon("training.png"))
self.ui.stopTrainingButton.setIcon(self.icon("stop.png"))
self.ui.uploadImageButton.setIcon(self.icon("upload.svg"))
self.ui.importLabelButton.setIcon(self.icon("download.png"))
self.ui.dgPositiveFiducialPlacementWidget.setMRMLScene(slicer.mrmlScene)
self.ui.dgPositiveFiducialPlacementWidget.placeButton().toolTip = "Select +ve points"
self.ui.dgPositiveFiducialPlacementWidget.buttonsVisible = False
self.ui.dgPositiveFiducialPlacementWidget.placeButton().show()
self.ui.dgPositiveFiducialPlacementWidget.deleteButton().show()
self.ui.dgNegativeFiducialPlacementWidget.setMRMLScene(slicer.mrmlScene)
self.ui.dgNegativeFiducialPlacementWidget.placeButton().toolTip = "Select -ve points"
self.ui.dgNegativeFiducialPlacementWidget.buttonsVisible = False
self.ui.dgNegativeFiducialPlacementWidget.placeButton().show()
self.ui.dgNegativeFiducialPlacementWidget.deleteButton().show()
self.ui.dgUpdateButton.setIcon(self.icon("segment.png"))
# Connections
self.ui.fetchServerInfoButton.connect("clicked(bool)", self.onClickFetchInfo)
self.ui.serverComboBox.connect("currentIndexChanged(int)", self.onClickFetchInfo)
self.ui.segmentationModelSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.segmentationButton.connect("clicked(bool)", self.onClickSegmentation)
self.ui.deepgrowModelSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.nextSampleButton.connect("clicked(bool)", self.onNextSampleButton)
self.ui.trainingButton.connect("clicked(bool)", self.onTraining)
self.ui.stopTrainingButton.connect("clicked(bool)", self.onStopTraining)
self.ui.saveLabelButton.connect("clicked(bool)", self.onSaveLabel)
self.ui.uploadImageButton.connect("clicked(bool)", self.onUploadImage)
self.ui.importLabelButton.connect("clicked(bool)", self.onImportLabel)
self.ui.labelComboBox.connect("currentIndexChanged(int)", self.onSelectLabel)
self.ui.dgUpdateButton.connect("clicked(bool)", self.onUpdateDeepgrow)
self.ui.dgUpdateCheckBox.setStyleSheet("padding-left: 10px;")
# Scribbles
# brush and eraser icon from: https://tablericons.com/
self.ui.scribblesMethodSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI)
self.ui.paintScribblesButton.setIcon(self.icon("paint.png"))
self.ui.paintScribblesButton.setToolTip("Paint scribbles for selected scribble layer")
self.ui.eraseScribblesButton.setIcon(self.icon("eraser.png"))
self.ui.eraseScribblesButton.setToolTip("Erase scribbles for selected scribble layer")
self.ui.updateScribblesButton.setIcon(self.icon("segment.png"))
self.ui.updateScribblesButton.setToolTip(
"Update label by sending scribbles to server to apply selected post processing method"
)
self.ui.brushSizeSlider.connect("valueChanged(double)", self.updateBrushSize)
self.ui.brushSizeSlider.setToolTip("Change brush size for scribbles tool")
self.ui.brush3dCheckbox.stateChanged.connect(self.on3dBrushCheckbox)
self.ui.brush3dCheckbox.setToolTip("Use 3D brush to paint/erase in multiple slices in 3D")
self.ui.updateScribblesButton.clicked.connect(self.onUpdateScribbles)
self.ui.paintScribblesButton.clicked.connect(self.onPaintScribbles)
self.ui.eraseScribblesButton.clicked.connect(self.onEraseScribbles)
self.ui.scribblesLabelSelector.connect("currentIndexChanged(int)", self.onSelectScribblesLabel)
# creating editable combo box
self.ui.scribblesLabelSelector.addItem(self.icon("fg_green.png"), "Foreground")
self.ui.scribblesLabelSelector.addItem(self.icon("bg_red.png"), "Background")
self.ui.scribblesLabelSelector.setCurrentIndex(0)
# start with scribbles section disabled
self.ui.scribblesCollapsibleButton.setEnabled(False)
self.ui.scribblesCollapsibleButton.collapsed = True
# embedded segment editor
self.ui.embeddedSegmentEditorWidget.setMRMLScene(slicer.mrmlScene)
self.ui.embeddedSegmentEditorWidget.setSegmentationNodeSelectorVisible(False)
self.ui.embeddedSegmentEditorWidget.setMasterVolumeNodeSelectorVisible(False)
self.initializeParameterNode()
self.updateServerUrlGUIFromSettings()
# self.onClickFetchInfo()
if slicer.util.settingsValue("MONAILabel/askForUserName", False, converter=slicer.util.toBool):
text = qt.QInputDialog().getText(
self.parent,
"User Name",
"Please enter your name:",
qt.QLineEdit.Normal,
slicer.util.settingsValue("MONAILabel/clientId", None),
)
if text:
settings = qt.QSettings()
settings.setValue("MONAILabel/clientId", text)
def cleanup(self):
self.removeObservers()
shutil.rmtree(self.tmpdir, ignore_errors=True)
def enter(self):
self.initializeParameterNode()
if self._segmentNode:
self.updateGUIFromParameterNode()
def exit(self):
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
def onSceneStartClose(self, caller, event):
self.state = {
"SegmentationModel": self.ui.segmentationModelSelector.currentText,
"DeepgrowModel": self.ui.deepgrowModelSelector.currentText,
"ScribblesMethod": self.ui.scribblesMethodSelector.currentText,
"CurrentStrategy": self.ui.strategyBox.currentText,
"CurrentTrainer": self.ui.trainerBox.currentText,
}
self._volumeNode = None
self._segmentNode = None
self._volumeNodes.clear()
self.setParameterNode(None)
self.current_sample = None
self.samples.clear()
self.resetFiducial(
self.ui.dgPositiveFiducialPlacementWidget, self.dgPositiveFiducialNode, self.dgPositiveFiducialNodeObservers
)
self.dgPositiveFiducialNode = None
self.resetFiducial(
self.ui.dgNegativeFiducialPlacementWidget, self.dgNegativeFiducialNode, self.dgNegativeFiducialNodeObservers
)
self.dgNegativeFiducialNode = None
self.onClearScribbles()
def resetFiducial(self, fiducialWidget, fiducialNode, fiducialNodeObservers):
if fiducialWidget.placeModeEnabled:
fiducialWidget.setPlaceModeEnabled(False)
if fiducialNode:
slicer.mrmlScene.RemoveNode(fiducialNode)
self.removeFiducialNodeObservers(fiducialNode, fiducialNodeObservers)
def onSceneEndClose(self, caller, event):
if self.parent.isEntered:
self.initializeParameterNode()
def onSceneEndImport(self, caller, event):
if not self._volumeNode:
self.updateGUIFromParameterNode()
def initializeParameterNode(self):
self.setParameterNode(self.logic.getParameterNode())
# Select default input nodes if nothing is selected yet to save a few clicks for the user
if not self._parameterNode.GetNodeReference("InputVolume"):
firstVolumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
if firstVolumeNode:
self._parameterNode.SetNodeReferenceID("InputVolume", firstVolumeNode.GetID())
def setParameterNode(self, inputParameterNode):
if inputParameterNode:
self.logic.setDefaultParameters(inputParameterNode)
if self._parameterNode is not None:
self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
self._parameterNode = inputParameterNode
if self._parameterNode is not None:
self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode)
# Initial GUI update
self.updateGUIFromParameterNode()
def monitorTraining(self):
status = self.isTrainingRunning(check_only=False)
if status and status.get("status") == "RUNNING":
info = self.logic.info()
train_stats = info.get("train_stats")
if not train_stats:
return
train_stats = next(iter(train_stats.values())) if train_stats else train_stats
current = 0 if train_stats.get("total_time") else train_stats.get("epoch", 1)
total = train_stats.get("total_epochs", 1)
percent = max(1, 100 * current / total)
if self.ui.trainingProgressBar.value != percent:
self.ui.trainingProgressBar.setValue(percent)
self.ui.trainingProgressBar.setToolTip(f"{current}/{total} epoch is completed")
dice = train_stats.get("best_metric", 0)
self.updateAccuracyBar(dice)
return
print("Training completed")
self.ui.trainingProgressBar.setValue(100)
self.timer.stop()
self.timer = None
self.ui.trainingProgressBar.setToolTip(f"Training: {status.get('status', 'DONE')}")
self.ui.trainingButton.setEnabled(True)
self.ui.stopTrainingButton.setEnabled(False)
self.fetchInfo()
def updateGUIFromParameterNode(self, caller=None, event=None):
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
# Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop)
self._updatingGUIFromParameterNode = True
file_ext = slicer.util.settingsValue("MONAILabel/fileExtension", self.file_ext)
self.file_ext = file_ext if file_ext else self.file_ext
# Update node selectors and sliders
self.ui.inputSelector.clear()
for v in self._volumeNodes:
self.ui.inputSelector.addItem(v.GetName())
self.ui.inputSelector.setToolTip(self.current_sample.get("name", "") if self.current_sample else "")
if self._volumeNode:
self.ui.inputSelector.setCurrentIndex(self.ui.inputSelector.findText(self._volumeNode.GetName()))
self.ui.inputSelector.setEnabled(False) # Allow only one active scene
self.ui.uploadImageButton.setEnabled(False)
if self.info and slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") and self._volumeNode is None:
self._volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
self.initSample({"id": self._volumeNode.GetName(), "session": True}, autosegment=False)
self.ui.inputSelector.setEnabled(False)
self.ui.uploadImageButton.setEnabled(self.current_sample and self.current_sample.get("session"))
self.updateSelector(self.ui.segmentationModelSelector, ["segmentation"], "SegmentationModel", 0)
self.updateSelector(self.ui.deepgrowModelSelector, ["deepgrow", "deepedit"], "DeepgrowModel", 0)
self.updateSelector(self.ui.scribblesMethodSelector, ["scribbles"], "ScribblesMethod", 0)
if self.models and [k for k, v in self.models.items() if v["type"] == "segmentation"]:
self.ui.segmentationCollapsibleButton.collapsed = False
if self.models and [k for k, v in self.models.items() if v["type"] in ("deepgrow", "deepedit")]:
self.ui.deepgrowCollapsibleButton.collapsed = False
if self.models and [k for k, v in self.models.items() if v["type"] == "scribbles"]:
self.ui.scribblesCollapsibleButton.collapsed = False
self.ui.labelComboBox.clear()
if self._segmentNode:
segmentation = self._segmentNode.GetSegmentation()
totalSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(totalSegments)]
for idx, segmentId in enumerate(segmentIds):
segment = segmentation.GetSegment(segmentId)
label = segment.GetName()
if label in ["foreground_scribbles", "background_scribbles"]:
continue
self.ui.labelComboBox.addItem(label)
else:
for label in self.info.get("labels", {}):
self.ui.labelComboBox.addItem(label)
currentLabel = self._parameterNode.GetParameter("CurrentLabel")
idx = self.ui.labelComboBox.findText(currentLabel) if currentLabel else 0
idx = 0 if idx < 0 < self.ui.labelComboBox.count else idx
self.ui.labelComboBox.setCurrentIndex(idx)
self.ui.appComboBox.clear()
self.ui.appComboBox.addItem(self.info.get("name", ""))
datastore_stats = self.info.get("datastore", {})
current = datastore_stats.get("completed", 0)
total = datastore_stats.get("total", 0)
self.ui.activeLearningProgressBar.setValue(current / max(total, 1) * 100)
self.ui.activeLearningProgressBar.setToolTip(f"{current}/{total} samples are labeled")
train_stats = self.info.get("train_stats", {})
train_stats = next(iter(train_stats.values())) if train_stats else train_stats
dice = train_stats.get("best_metric", 0)
self.updateAccuracyBar(dice)
self.ui.strategyBox.clear()
for strategy in self.info.get("strategies", {}):
self.ui.strategyBox.addItem(strategy)
currentStrategy = self._parameterNode.GetParameter("CurrentStrategy")
currentStrategy = currentStrategy if currentStrategy else self.state["CurrentStrategy"]
self.ui.strategyBox.setCurrentIndex(self.ui.strategyBox.findText(currentStrategy) if currentStrategy else 0)
self.ui.trainerBox.clear()
trainers = self.info.get("trainers", {})
if trainers:
self.ui.trainerBox.addItem("ALL")
for t in trainers:
self.ui.trainerBox.addItem(t)
currentTrainer = self._parameterNode.GetParameter("CurrentTrainer")
currentTrainer = currentTrainer if currentTrainer else self.state["CurrentTrainer"]
self.ui.trainerBox.setCurrentIndex(self.ui.trainerBox.findText(currentTrainer) if currentTrainer else 0)
developer_mode = slicer.util.settingsValue("MONAILabel/developerMode", True, converter=slicer.util.toBool)
self.ui.optionsCollapsibleButton.setVisible(developer_mode)
# Enable/Disable
self.ui.nextSampleButton.setEnabled(self.ui.strategyBox.count)
is_training_running = True if self.info and self.isTrainingRunning() else False
self.ui.trainingButton.setEnabled(self.info and not is_training_running and current)
self.ui.stopTrainingButton.setEnabled(is_training_running)
if is_training_running and self.timer is None:
self.timer = qt.QTimer()
self.timer.setInterval(5000)
self.timer.connect("timeout()", self.monitorTraining)
self.timer.start()
self.ui.segmentationButton.setEnabled(
self.ui.segmentationModelSelector.currentText and self._volumeNode is not None
)
self.ui.saveLabelButton.setEnabled(self._segmentNode is not None)
self.ui.importLabelButton.setEnabled(self._segmentNode is not None)
# Create empty markup fiducial node for deep grow +ve and -ve
if self._segmentNode:
if not self.dgPositiveFiducialNode:
self.dgPositiveFiducialNode, self.dgPositiveFiducialNodeObservers = self.createFiducialNode(
"P", self.onDeepGrowFiducialNodeModified, [0.5, 1, 0.5]
)
self.ui.dgPositiveFiducialPlacementWidget.setCurrentNode(self.dgPositiveFiducialNode)
self.ui.dgPositiveFiducialPlacementWidget.setPlaceModeEnabled(False)
if not self.dgNegativeFiducialNode:
self.dgNegativeFiducialNode, self.dgNegativeFiducialNodeObservers = self.createFiducialNode(
"N", self.onDeepGrowFiducialNodeModified, [0.5, 0.5, 1]
)
self.ui.dgNegativeFiducialPlacementWidget.setCurrentNode(self.dgNegativeFiducialNode)
self.ui.dgNegativeFiducialPlacementWidget.setPlaceModeEnabled(False)
self.ui.scribblesCollapsibleButton.setEnabled(self.ui.scribblesMethodSelector.count)
self.ui.scribblesCollapsibleButton.collapsed = False
self.ui.dgPositiveFiducialPlacementWidget.setEnabled(self.ui.deepgrowModelSelector.currentText)
self.ui.dgNegativeFiducialPlacementWidget.setEnabled(self.ui.deepgrowModelSelector.currentText)
self.multi_label = "background" in self.info.get("labels", [])
if self.multi_label:
self.ui.dgLabelBackground.hide()
self.ui.dgNegativeFiducialPlacementWidget.hide()
self.ui.freezeUpdateCheckBox.show()
self.ui.dgLabelForeground.setText("Landmarks:")
else:
self.ui.dgNegativeFiducialPlacementWidget.show()
self.ui.freezeUpdateCheckBox.hide()
self.ui.dgLabelForeground.setText("Foreground:")
self.ui.dgUpdateCheckBox.setEnabled(self.ui.deepgrowModelSelector.currentText and self._segmentNode)
self.ui.dgUpdateButton.setEnabled(self.ui.deepgrowModelSelector.currentText and self._segmentNode)
self.ui.embeddedSegmentEditorWidget.setMRMLSegmentEditorNode(
slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLSegmentEditorNode")
)
# All the GUI updates are done
self._updatingGUIFromParameterNode = False
def updateParameterNodeFromGUI(self, caller=None, event=None):
if self._parameterNode is None or self._updatingGUIFromParameterNode:
return
wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch
segmentationModelIndex = self.ui.segmentationModelSelector.currentIndex
if segmentationModelIndex >= 0:
segmentationModel = self.ui.segmentationModelSelector.itemText(segmentationModelIndex)
self._parameterNode.SetParameter("SegmentationModel", segmentationModel)
deepgrowModelIndex = self.ui.deepgrowModelSelector.currentIndex
if deepgrowModelIndex >= 0:
deepgrowModel = self.ui.deepgrowModelSelector.itemText(deepgrowModelIndex)
self._parameterNode.SetParameter("DeepgrowModel", deepgrowModel)
scribblesMethodIndex = self.ui.scribblesMethodSelector.currentIndex
if scribblesMethodIndex >= 0:
scribblesMethod = self.ui.scribblesMethodSelector.itemText(scribblesMethodIndex)
self._parameterNode.SetParameter("ScribblesMethod", scribblesMethod)
currentLabelIndex = self.ui.labelComboBox.currentIndex
if currentLabelIndex >= 0:
currentLabel = self.ui.labelComboBox.itemText(currentLabelIndex)
self._parameterNode.SetParameter("CurrentLabel", currentLabel)
currentStrategyIndex = self.ui.strategyBox.currentIndex
if currentStrategyIndex >= 0:
currentStrategy = self.ui.strategyBox.itemText(currentStrategyIndex)
self._parameterNode.SetParameter("CurrentStrategy", currentStrategy)
currentTrainerIndex = self.ui.trainerBox.currentIndex
if currentTrainerIndex >= 0:
currentTrainer = self.ui.trainerBox.itemText(currentTrainerIndex)
self._parameterNode.SetParameter("CurrentTrainer", currentTrainer)
self._parameterNode.EndModify(wasModified)
def updateSelector(self, selector, model_types, param, defaultIndex=0):
wasSelectorBlocked = selector.blockSignals(True)
selector.clear()
for model_name, model in self.models.items():
if model["type"] in model_types:
selector.addItem(model_name)
selector.setItemData(selector.count - 1, model["description"], qt.Qt.ToolTipRole)
model = self._parameterNode.GetParameter(param)
model = model if model else self.state.get(param, "")
modelIndex = selector.findText(model)
modelIndex = defaultIndex if modelIndex < 0 < selector.count else modelIndex
selector.setCurrentIndex(modelIndex)
try:
modelInfo = self.models[model]
selector.setToolTip(modelInfo["description"])
except:
selector.setToolTip("")
selector.blockSignals(wasSelectorBlocked)
def updateConfigTable(self):
table = self.ui.configTable
table.clear()
headers = ["section", "name", "key", "value"]
table.setColumnCount(len(headers))
table.setHorizontalHeaderLabels(headers)
table.setColumnWidth(0, 50)
config = copy.deepcopy(self.info)
infer = config.get("models", {})
train = config.get("trainers", {})
activelearning = config.get("strategies", {})
scoring = config.get("scoring", {})
row_count = 0
config = {"infer": infer, "train": train, "activelearning": activelearning, "scoring": scoring}
for c in config.values():
row_count += sum([len(c[k].get("config", {})) for k in c.keys()])
# print(f"Total rows: {row_count}")
table.setRowCount(row_count)
n = 0
for section in config:
if not config[section]:
continue
c_section = config[section]
l_section = sum([len(c_section[k].get("config", {})) for k in c_section.keys()])
if not l_section:
continue
# print(f"{n} => l_section = {l_section}")
if l_section:
table.setSpan(n, 0, l_section, 1)
for name in c_section:
c_name = c_section[name]
l_name = len(c_name.get("config", {}))
if not l_name:
continue
# print(f"{n} => l_name = {l_name}")
if l_name:
table.setSpan(n, 1, l_name, 1)
for key, val in c_name.get("config", {}).items():
item = qt.QTableWidgetItem(section)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
table.setItem(n, 0, item)
item = qt.QTableWidgetItem(name)
table.setItem(n, 1, item)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
item = qt.QTableWidgetItem(key)
table.setItem(n, 2, item)
item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable)
if isinstance(val, dict) or isinstance(val, list):
combo = qt.QComboBox()
for m, v in enumerate(val):
combo.addItem(v)
combo.setCurrentIndex(0)
table.setCellWidget(n, 3, combo)
elif isinstance(val, bool):
checkbox = qt.QCheckBox()
checkbox.setChecked(val)
table.setCellWidget(n, 3, checkbox)
else:
table.setItem(n, 3, qt.QTableWidgetItem(str(val) if val else ""))
# print(f"{n} => {section} => {name} => {key} => {val}")
n = n + 1
def updateAccuracyBar(self, dice):
self.ui.accuracyProgressBar.setValue(dice * 100)
css = ["stop: 0 red"]
if dice > 0.5:
css.append(f"stop: {0.5 / dice} orange")
if dice > 0.6:
css.append(f"stop: {0.6 / dice} yellow")
if dice > 0.7:
css.append(f"stop: {0.7 / dice} lightgreen")
if dice > 0.8:
css.append(f"stop: {0.8 / dice} green")
if dice > 0.9:
css.append(f"stop: {0.9 / dice} darkgreen")
self.ui.accuracyProgressBar.setStyleSheet(
"QProgressBar {text-align: center;} "
"QProgressBar::chunk {background-color: "
"qlineargradient(x0: 0, x2: 1, " + ",".join(css) + ")}"
)
self.ui.accuracyProgressBar.setToolTip(f"Accuracy: {dice:.4f}")
def getParamsFromConfig(self, filter, filter2=None):
mapping = {"infer": "models", "train": "trainers", "activelearning": "strategies", "scoring": "scoring"}
config = {}
for row in range(self.ui.configTable.rowCount):
section = str(self.ui.configTable.item(row, 0).text())
name = str(self.ui.configTable.item(row, 1).text())
key = str(self.ui.configTable.item(row, 2).text())
value = self.ui.configTable.item(row, 3)
if value is None:
value = self.ui.configTable.cellWidget(row, 3)
value = value.checked if isinstance(value, qt.QCheckBox) else value.currentText
else:
value = str(value.text())
v = self.info.get(mapping.get(section, ""), {}).get(name, {}).get("config", {}).get(key, {})
if isinstance(v, int):
value = int(value) if value else 0
elif isinstance(v, float):
value = float(value) if value else 0.0
# print(f"{section} => {name} => {key} => {value}")
if config.get(section) is None:
config[section] = {}
if config[section].get(name) is None:
config[section][name] = {}
config[section][name][key] = value
# print(f"row: {row}, section: {section}, name: {name}, value: {value}, type: {type(v)}")
res = config.get(filter, {})
res = res.get(filter2, {}) if filter2 else res
return res
def onDeepGrowFiducialNodeModified(self, observer, eventid):
logging.debug("Deepgrow Point Event!!")
if self.ignoreFiducialNodeAddEvent:
return
markupsNode = observer
movingMarkupIndex = markupsNode.GetDisplayNode().GetActiveControlPoint()
logging.debug("Markup point added; point ID = {}".format(movingMarkupIndex))
current_point = self.getFiducialPointXYZ(markupsNode, movingMarkupIndex)
if not self.ui.dgUpdateCheckBox.checked:
self.onClickDeepgrow(current_point, skip_infer=True)
return
self.onClickDeepgrow(current_point)
self.ignoreFiducialNodeAddEvent = True
self.onEditFiducialPoints(self.dgPositiveFiducialNode, "MONAILabel.ForegroundPoints")
self.onEditFiducialPoints(self.dgNegativeFiducialNode, "MONAILabel.BackgroundPoints")
self.ignoreFiducialNodeAddEvent = False
def getFiducialPointsXYZ(self, fiducialNode, name):
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
point_set = []
n = fiducialNode.GetNumberOfFiducials()
for i in range(n):
coord = [0.0, 0.0, 0.0]
fiducialNode.GetNthFiducialPosition(i, coord)
world = [0, 0, 0, 0]
fiducialNode.GetNthFiducialWorldCoordinates(i, world)
p_Ras = [coord[0], coord[1], coord[2], 1.0]
p_Ijk = RasToIjkMatrix.MultiplyDoublePoint(p_Ras)
p_Ijk = [round(i) for i in p_Ijk]
logging.debug("RAS: {}; WORLD: {}; IJK: {}".format(coord, world, p_Ijk))
point_set.append(p_Ijk[0:3])
logging.info("{} => Current Fiducials-Points: {}".format(name, point_set))
return point_set
def getFiducialPointXYZ(self, fiducialNode, index):
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
coord = [0.0, 0.0, 0.0]
fiducialNode.GetNthFiducialPosition(index, coord)
world = [0, 0, 0, 0]
fiducialNode.GetNthFiducialWorldCoordinates(index, world)
p_Ras = [coord[0], coord[1], coord[2], 1.0]
p_Ijk = RasToIjkMatrix.MultiplyDoublePoint(p_Ras)
p_Ijk = [round(i) for i in p_Ijk]
logging.debug("RAS: {}; WORLD: {}; IJK: {}".format(coord, world, p_Ijk))
return p_Ijk[0:3]
def onEditFiducialPoints(self, fiducialNode, tagName):
if fiducialNode is None:
return
fiducialNode.RemoveAllMarkups()
segmentId, segment = self.currentSegment()
if segment and segmentId:
v = self._volumeNode
IjkToRasMatrix = vtk.vtkMatrix4x4()
v.GetIJKToRASMatrix(IjkToRasMatrix)
fPosStr = vtk.mutable("")
segment.GetTag(tagName, fPosStr)
pointset = str(fPosStr)
logging.debug("{} => {} Fiducial points are: {}".format(segmentId, segment.GetName(), pointset))
if fPosStr is not None and len(pointset) > 0:
points = json.loads(pointset)
for p in points:
p_Ijk = [p[0], p[1], p[2], 1.0]
p_Ras = IjkToRasMatrix.MultiplyDoublePoint(p_Ijk)
logging.debug("Add Fiducial: {} => {}".format(p_Ijk, p_Ras))
fiducialNode.AddFiducialFromArray(p_Ras[0:3])
def currentSegment(self):
segmentation = self._segmentNode.GetSegmentation()
segmentId = segmentation.GetSegmentIdBySegmentName(self.ui.labelComboBox.currentText)
segment = segmentation.GetSegment(segmentId)
logging.debug("Current SegmentID: {}; Segment: {}".format(segmentId, segment))
return segmentId, segment
def onSelectLabel(self, caller=None, event=None):
self.updateParameterNodeFromGUI(caller, event)
self.ignoreFiducialNodeAddEvent = True
self.onEditFiducialPoints(self.dgPositiveFiducialNode, "MONAILabel.ForegroundPoints")
self.onEditFiducialPoints(self.dgNegativeFiducialNode, "MONAILabel.BackgroundPoints")
self.ignoreFiducialNodeAddEvent = False
def icon(self, name="MONAILabel.png"):
# It should not be necessary to modify this method
iconPath = os.path.join(os.path.dirname(__file__), "Resources", "Icons", name)
if os.path.exists(iconPath):
return qt.QIcon(iconPath)
return qt.QIcon()
def updateServerSettings(self):
self.logic.setServer(self.serverUrl())
self.logic.setClientId(slicer.util.settingsValue("MONAILabel/clientId", "user-xyz"))
self.saveServerUrl()
def serverUrl(self):
serverUrl = self.ui.serverComboBox.currentText
if not serverUrl:
serverUrl = "http://127.0.0.1:8000"
return serverUrl.rstrip("/")
def saveServerUrl(self):
self.updateParameterNodeFromGUI()
# Save selected server URL
settings = qt.QSettings()
serverUrl = self.ui.serverComboBox.currentText
settings.setValue("MONAILabel/serverUrl", serverUrl)
# Save current server URL to the top of history
serverUrlHistory = settings.value("MONAILabel/serverUrlHistory")
if serverUrlHistory:
serverUrlHistory = serverUrlHistory.split(";")
else:
serverUrlHistory = []
try:
serverUrlHistory.remove(serverUrl)
except ValueError:
pass
serverUrlHistory.insert(0, serverUrl)
serverUrlHistory = serverUrlHistory[:10] # keep up to first 10 elements
settings.setValue("MONAILabel/serverUrlHistory", ";".join(serverUrlHistory))
self.updateServerUrlGUIFromSettings()
def onClickFetchInfo(self):
self.fetchInfo()
self.updateConfigTable()
def fetchInfo(self, showInfo=False):
if not self.logic:
return
start = time.time()
try:
self.updateServerSettings()
info = self.logic.info()
self.info = info
if self.info.get("config"):
slicer.util.errorDisplay(
"Please upgrade the monai server to latest version",
detailedText=traceback.format_exc(),
)
return
except:
slicer.util.errorDisplay(
"Failed to fetch models from remote server. "
"Make sure server address is correct and <server_uri>/info/ "
"is accessible in browser",
detailedText=traceback.format_exc(),
)
return
self.models.clear()
self.config = info.get("config", {})
model_count = {}
models = info.get("models", {})
for k, v in models.items():
model_type = v.get("type", "segmentation")
model_count[model_type] = model_count.get(model_type, 0) + 1
logging.debug("{} = {}".format(k, model_type))
self.models[k] = v
self.updateGUIFromParameterNode()
msg = ""
msg += "-----------------------------------------------------\t\n"
msg += "Total Models Available: \t" + str(len(models)) + "\t\n"
msg += "-----------------------------------------------------\t\n"
for model_type in model_count.keys():
msg += model_type.capitalize() + " Models: \t" + str(model_count[model_type]) + "\t\n"
msg += "-----------------------------------------------------\t\n"
if showInfo:
qt.QMessageBox.information(slicer.util.mainWindow(), "MONAI Label", msg)
logging.info(msg)
logging.info("Time consumed by fetch info: {0:3.1f}".format(time.time() - start))
def setProgressBarLabelText(self, label):
if not self.progressBar:
self.progressBar = slicer.util.createProgressDialog(windowTitle="Wait...", maximum=100)
self.progressBar.labelText = label
def reportProgress(self, progressPercentage):
if not self.progressBar:
self.progressBar = slicer.util.createProgressDialog(windowTitle="Wait...", maximum=100)
self.progressBar.show()
self.progressBar.activateWindow()
self.progressBar.setValue(progressPercentage)
slicer.app.processEvents()
def onTraining(self):
start = time.time()
status = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
model = self.ui.trainerBox.currentText
model = model if model and model != "ALL" else None
params = self.getParamsFromConfig("train", model)
status = self.logic.train_start(model, params)
self.ui.trainingProgressBar.setValue(1)
self.ui.trainingProgressBar.setToolTip("Training: STARTED")
time.sleep(1)
self.updateGUIFromParameterNode()
except:
slicer.util.errorDisplay(
"Failed to run training in MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
if status:
msg = "ID: {}\nStatus: {}\nStart Time: {}\n".format(
status.get("id"),
status.get("status"),
status.get("start_ts"),
)
# slicer.util.infoDisplay(msg, detailedText=json.dumps(status, indent=2))
logging.info(msg)
logging.info("Time consumed by training: {0:3.1f}".format(time.time() - start))
def onStopTraining(self):
start = time.time()
status = None
if not slicer.util.confirmOkCancelDisplay(
"This will kill/stop current Training task. Are you sure to continue?"
):
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
status = self.logic.train_stop()
except:
slicer.util.errorDisplay("Failed to stop Training Task", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
if status:
msg = "Status: {}\nStart Time: {}\nEnd Time: {}\nResult: {}".format(
status.get("status"),
status.get("start_ts"),
status.get("end_ts"),
status.get("result", status.get("details", [])[-1]),
)
# slicer.util.infoDisplay(msg, detailedText=json.dumps(status, indent=2))
logging.info(msg)
self.updateGUIFromParameterNode()
logging.info("Time consumed by stop training: {0:3.1f}".format(time.time() - start))
def isTrainingRunning(self, check_only=True):
if not self.logic:
return False
self.updateServerSettings()
return self.logic.train_status(check_only)
def onNextSampleButton(self):
if not self.logic:
return
if self._volumeNode or len(slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode")):
if not slicer.util.confirmOkCancelDisplay(
"This will close current scene. Please make sure you have saved your current work.\n"
"Are you sure to continue?"
):
return
self.onClearScribbles()
slicer.mrmlScene.Clear(0)
start = time.time()
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
strategy = self.ui.strategyBox.currentText
if not strategy:
slicer.util.errorDisplay("No Strategy Found/Selected\t")
return
sample = self.logic.next_sample(strategy, self.getParamsFromConfig("activelearning", strategy))
logging.debug(sample)
if not sample.get("id"):
slicer.util.warningDisplay(
"Unlabled Samples/Images Not Found at server. Instead you can load your own image."
)
return
if self.samples.get(sample["id"]) is not None:
self.current_sample = self.samples[sample["id"]]
name = self.current_sample["VolumeNodeName"]
index = self.ui.inputSelector.findText(name)
self.ui.inputSelector.setCurrentIndex(index)
return
logging.info(sample)
image_id = sample["id"]
image_file = sample.get("path")
image_name = sample.get("name", image_id)
node_name = sample.get("PatientID", sample.get("name", image_id))[-20:]
checksum = sample.get("checksum")
local_exists = image_file and os.path.exists(image_file)
logging.info(f"Check if file exists/shared locally: {image_file} => {local_exists}")
if local_exists:
self._volumeNode = slicer.util.loadVolume(image_file)
self._volumeNode.SetName(node_name)
else:
download_uri = f"{self.serverUrl()}/datastore/image?image={quote_plus(image_id)}"
logging.info(download_uri)
sampleDataLogic = SampleData.SampleDataLogic()
self._volumeNode = sampleDataLogic.downloadFromURL(
nodeNames=node_name, fileNames=image_name, uris=download_uri, checksums=checksum
)[0]
self.initSample(sample)
except:
slicer.util.errorDisplay(
"Failed to fetch Sample from MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
logging.info("Time consumed by next_sample: {0:3.1f}".format(time.time() - start))
def initSample(self, sample, autosegment=True):
sample["VolumeNodeName"] = self._volumeNode.GetName()
self.current_sample = sample
self.samples[sample["id"]] = sample
self._volumeNodes.append(self._volumeNode)
# Create Empty Segments for all labels for this node
self.createSegmentNode()
segmentEditorWidget = slicer.modules.segmenteditor.widgetRepresentation().self().editor
segmentEditorWidget.setSegmentationNode(self._segmentNode)
segmentEditorWidget.setMasterVolumeNode(self._volumeNode)
# check if user allows overlapping segments
if slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", False, converter=slicer.util.toBool):
# set segment editor to allow overlaps
slicer.util.getNodesByClass("vtkMRMLSegmentEditorNode")[0].SetOverwriteMode(2)
if self.info.get("labels"):
self.updateSegmentationMask(None, self.info.get("labels"))
# Check if user wants to run auto-segmentation on new sample
if autosegment and slicer.util.settingsValue(
"MONAILabel/autoRunSegmentationOnNextSample", True, converter=slicer.util.toBool
):
for label in self.info.get("labels", []):
for name, model in self.models.items():
if label in model.get("labels", []):
qt.QApplication.restoreOverrideCursor()
self.ui.segmentationModelSelector.currentText = name
self.onClickSegmentation()
return
def getPermissionForImageDataUpload(self):
return slicer.util.confirmOkCancelDisplay(
"Master volume - without any additional patient information -"
" will be sent to remote data processing server: {0}.\n\n"
"Click 'OK' to proceed with the segmentation.\n"
"Click 'Cancel' to not upload any data and cancel segmentation.\n".format(self.serverUrl()),
dontShowAgainSettingsKey="MONAILabel/showImageDataSendWarning",
)
def onUploadImage(self, init_sample=True, session=False):
volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode")
image_id = volumeNode.GetName()
if not self.getPermissionForImageDataUpload():
return False
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
in_file = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
start = time.time()
slicer.util.saveNode(volumeNode, in_file)
logging.info("Saved Input Node into {0} in {1:3.1f}s".format(in_file, time.time() - start))
self.reportProgress(30)
if session:
self.current_sample["session_id"] = self.logic.create_session(in_file)["session_id"]
else:
self.logic.upload_image(in_file, image_id)
self.current_sample["session"] = False
self.reportProgress(100)
self._volumeNode = volumeNode
if init_sample:
self.initSample({"id": image_id}, autosegment=False)
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
return True
except:
self.reportProgress(100)
qt.QApplication.restoreOverrideCursor()
if session:
slicer.util.errorDisplay(
"Server Error:: Session creation Failed\nPlease upgrade to latest monailable version (> 0.2.0)",
detailedText=traceback.format_exc(),
)
else:
slicer.util.errorDisplay("Failed to upload volume to Server", detailedText=traceback.format_exc())
return False
def onImportLabel(self):
if not self.ui.labelPathLineEdit.currentPath or not os.path.exists(self.ui.labelPathLineEdit.currentPath):
slicer.util.warningDisplay("Label File not selected")
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateSegmentationMask(self.ui.labelPathLineEdit.currentPath, self.info["labels"])
qt.QApplication.restoreOverrideCursor()
except:
qt.QApplication.restoreOverrideCursor()
slicer.util.errorDisplay("Failed to import label", detailedText=traceback.format_exc())
def onSaveLabel(self):
start = time.time()
labelmapVolumeNode = None
result = None
self.onClearScribbles()
if self.current_sample.get("session"):
if not self.onUploadImage(init_sample=False):
return
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
segmentationNode = self._segmentNode
labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode(
segmentationNode, labelmapVolumeNode, self._volumeNode
)
segmentation = segmentationNode.GetSegmentation()
totalSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(totalSegments)]
label_info = []
for idx, segmentId in enumerate(segmentIds):
segment = segmentation.GetSegment(segmentId)
if segment.GetName() in ["foreground_scribbles", "background_scribbles"]:
logging.info(f"Removing segment {segmentId}: {segment.GetName()}")
segmentationNode.RemoveSegment(segmentId)
continue
label_info.append({"name": segment.GetName(), "idx": idx + 1})
# label_info.append({"color": segment.GetColor()})
label_in = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
if (
slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", True, converter=slicer.util.toBool)
and slicer.util.settingsValue("MONAILabel/fileExtension", self.file_ext) == ".seg.nrrd"
):
slicer.util.saveNode(segmentationNode, label_in)
else:
slicer.util.saveNode(labelmapVolumeNode, label_in)
self.reportProgress(30)
self.updateServerSettings()
result = self.logic.save_label(self.current_sample["id"], label_in, {"label_info": label_info})
self.fetchInfo()
if slicer.util.settingsValue("MONAILabel/autoUpdateModel", True, converter=slicer.util.toBool):
try:
if self.isTrainingRunning(check_only=True):
self.logic.train_stop()
except:
logging.info("Failed to stop training; or already stopped")
self.onTraining()
except:
slicer.util.errorDisplay("Failed to save Label to MONAI Label Server", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
self.reportProgress(100)
if labelmapVolumeNode:
slicer.mrmlScene.RemoveNode(labelmapVolumeNode)
if result:
slicer.util.infoDisplay(
"Label-Mask saved into MONAI Label Server\t\t", detailedText=json.dumps(result, indent=2)
)
if slicer.util.settingsValue("MONAILabel/autoFetchNextSample", False, converter=slicer.util.toBool):
slicer.mrmlScene.Clear(0)
self.onNextSampleButton()
logging.info("Time consumed by save label: {0:3.1f}".format(time.time() - start))
def getSessionId(self):
session_id = None
if self.current_sample.get("session", False):
session_id = self.current_sample.get("session_id")
if not session_id or not self.logic.get_session(session_id):
self.onUploadImage(init_sample=False, session=True)
session_id = self.current_sample["session_id"]
return session_id
def onClickSegmentation(self):
if not self.current_sample:
return
start = time.time()
result_file = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
self.updateServerSettings()
model = self.ui.segmentationModelSelector.currentText
image_file = self.current_sample["id"]
params = self.getParamsFromConfig("infer", model)
result_file, params = self.logic.infer(model, image_file, params, session_id=self.getSessionId())
print(f"Result Params for Segmentation: {params}")
labels = (
params.get("label_names") if params and params.get("label_names") else self.models[model].get("labels")
)
if labels and isinstance(labels, dict):
labels = [k for k, _ in sorted(labels.items(), key=lambda item: item[1])]
self.updateSegmentationMask(result_file, labels)
except:
slicer.util.errorDisplay(
"Failed to run inference in MONAI Label Server", detailedText=traceback.format_exc()
)
finally:
qt.QApplication.restoreOverrideCursor()
if result_file and os.path.exists(result_file):
os.unlink(result_file)
self.updateGUIFromParameterNode()
logging.info("Time consumed by segmentation: {0:3.1f}".format(time.time() - start))
def onUpdateDeepgrow(self):
self.onClickDeepgrow(None)
def onClickDeepgrow(self, current_point, skip_infer=False):
model = self.ui.deepgrowModelSelector.currentText
if not model:
slicer.util.warningDisplay("Please select a deepgrow model")
return
_, segment = self.currentSegment()
if not segment:
slicer.util.warningDisplay("Please add the required label to run deepgrow")
return
foreground_all = self.getFiducialPointsXYZ(self.dgPositiveFiducialNode, "foreground")
background_all = self.getFiducialPointsXYZ(self.dgNegativeFiducialNode, "background")
segment.SetTag("MONAILabel.ForegroundPoints", json.dumps(foreground_all))
segment.SetTag("MONAILabel.BackgroundPoints", json.dumps(background_all))
if skip_infer:
return
# use model info "deepgrow" to determine
deepgrow_3d = False if self.models[model].get("dimension", 3) == 2 else True
start = time.time()
label = segment.GetName()
operationDescription = "Run Deepgrow for segment: {}; model: {}; 3d {}".format(label, model, deepgrow_3d)
logging.debug(operationDescription)
if not current_point:
if not foreground_all and not deepgrow_3d:
slicer.util.warningDisplay(operationDescription + " - points not added")
return
current_point = foreground_all[-1] if foreground_all else background_all[-1] if background_all else None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
sliceIndex = None
if self.multi_label:
params = {}
segmentation = self._segmentNode.GetSegmentation()
for name in self.info.get("labels", []):
points = []
segmentId = segmentation.GetSegmentIdBySegmentName(name)
segment = segmentation.GetSegment(segmentId) if segmentId else None
if segment:
fPosStr = vtk.mutable("")
segment.GetTag("MONAILabel.ForegroundPoints", fPosStr)
pointset = str(fPosStr)
print("{} => {} Fiducial points are: {}".format(segmentId, name, pointset))
if fPosStr is not None and len(pointset) > 0:
points = json.loads(pointset)
params[name] = points
params["label"] = label
labels = None
else:
sliceIndex = current_point[2] if current_point else None
logging.debug("Slice Index: {}".format(sliceIndex))
if deepgrow_3d or not sliceIndex:
foreground = foreground_all
background = background_all
else:
foreground = [x for x in foreground_all if x[2] == sliceIndex]
background = [x for x in background_all if x[2] == sliceIndex]
logging.debug("Foreground: {}".format(foreground))
logging.debug("Background: {}".format(background))
logging.debug("Current point: {}".format(current_point))
params = {
"label": label,
"foreground": foreground,
"background": background,
}
labels = [label]
params["label"] = label
params.update(self.getParamsFromConfig("infer", model))
print(f"Request Params for Deepgrow/Deepedit: {params}")
image_file = self.current_sample["id"]
result_file, params = self.logic.infer(model, image_file, params, session_id=self.getSessionId())
print(f"Result Params for Deepgrow/Deepedit: {params}")
if labels is None:
labels = (
params.get("label_names")
if params and params.get("label_names")
else self.models[model].get("labels")
)
if labels and isinstance(labels, dict):
labels = [k for k, _ in sorted(labels.items(), key=lambda item: item[1])]
freeze = label if self.ui.freezeUpdateCheckBox.checked else None
self.updateSegmentationMask(result_file, labels, None if deepgrow_3d else sliceIndex, freeze=freeze)
except:
logging.exception("Unknown Exception")
slicer.util.errorDisplay(operationDescription + " - unexpected error.", detailedText=traceback.format_exc())
finally:
qt.QApplication.restoreOverrideCursor()
self.updateGUIFromParameterNode()
logging.info("Time consumed by Deepgrow: {0:3.1f}".format(time.time() - start))
def createCursor(self, widget):
return slicer.util.mainWindow().cursor
def createSegmentNode(self):
if self._volumeNode is None:
return
if self._segmentNode is None:
name = "segmentation_" + self._volumeNode.GetName()
self._segmentNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode")
self._segmentNode.SetReferenceImageGeometryParameterFromVolumeNode(self._volumeNode)
self._segmentNode.SetName(name)
def getLabelColor(self, name):
color = GenericAnatomyColors.get(name.lower())
return [c / 255.0 for c in color] if color else None
def updateSegmentationMask(self, in_file, labels, sliceIndex=None, freeze=None):
# TODO:: Add ROI Node (for Bounding Box if provided in the result)
start = time.time()
logging.debug("Update Segmentation Mask from: {}".format(in_file))
if in_file and not os.path.exists(in_file):
return False
segmentationNode = self._segmentNode
segmentation = segmentationNode.GetSegmentation()
if in_file is None:
for label in labels:
if not segmentation.GetSegmentIdBySegmentName(label):
segmentation.AddEmptySegment(label, label, self.getLabelColor(label))
return True
labels = [l for l in labels if l != "background"]
print(f"Update Segmentation Mask using Labels: {labels}")
# segmentId, segment = self.currentSegment()
labelImage = sitk.ReadImage(in_file)
labelmapVolumeNode = sitkUtils.PushVolumeToSlicer(labelImage, None, className="vtkMRMLLabelMapVolumeNode")
existing_label_ids = {}
for label in labels:
id = segmentation.GetSegmentIdBySegmentName(label)
if id:
existing_label_ids[label] = id
freeze = [freeze] if freeze and isinstance(freeze, str) else freeze
print(f"Import only Freezed label: {freeze}")
numberOfExistingSegments = segmentation.GetNumberOfSegments()
slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(labelmapVolumeNode, segmentationNode)
slicer.mrmlScene.RemoveNode(labelmapVolumeNode)
numberOfAddedSegments = segmentation.GetNumberOfSegments() - numberOfExistingSegments
logging.debug("Adding {} segments".format(numberOfAddedSegments))
addedSegmentIds = [
segmentation.GetNthSegmentID(numberOfExistingSegments + i) for i in range(numberOfAddedSegments)
]
for i, segmentId in enumerate(addedSegmentIds):
segment = segmentation.GetSegment(segmentId)
print("Setting new segmentation with id: {} => {}".format(segmentId, segment.GetName()))
label = labels[i] if i < len(labels) else "unknown {}".format(i)
# segment.SetName(label)
# segment.SetColor(self.getLabelColor(label))
if freeze and label not in freeze:
print(f"Discard label update for: {label}")
elif label in existing_label_ids:
segmentEditorWidget = slicer.modules.segmenteditor.widgetRepresentation().self().editor
segmentEditorWidget.setSegmentationNode(segmentationNode)
segmentEditorWidget.setMasterVolumeNode(self._volumeNode)
segmentEditorWidget.setCurrentSegmentID(existing_label_ids[label])
effect = segmentEditorWidget.effectByName("Logical operators")
labelmap = slicer.vtkOrientedImageData()
segmentationNode.GetBinaryLabelmapRepresentation(segmentId, labelmap)
if sliceIndex:
selectedSegmentLabelmap = effect.selectedSegmentLabelmap()
dims = selectedSegmentLabelmap.GetDimensions()
count = 0
for x in range(dims[0]):
for y in range(dims[1]):
if selectedSegmentLabelmap.GetScalarComponentAsDouble(x, y, sliceIndex, 0):
count = count + 1
selectedSegmentLabelmap.SetScalarComponentFromDouble(x, y, sliceIndex, 0, 0)
logging.debug("Total Non Zero: {}".format(count))
# Clear the Slice
if count:
effect.modifySelectedSegmentByLabelmap(
selectedSegmentLabelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet
)
# Union label map
effect.modifySelectedSegmentByLabelmap(
labelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeAdd
)
else:
# adding bypass masking to not overwrite other layers,
# needed for preserving scribbles during updates
# help from: https://github.com/Slicer/Slicer/blob/master/Modules/Loadable/Segmentations/EditorEffects/Python/SegmentEditorLogicalEffect.py
bypassMask = True
effect.modifySelectedSegmentByLabelmap(
labelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet, bypassMask
)
segmentationNode.RemoveSegment(segmentId)
self.showSegmentationsIn3D()
logging.info("Time consumed by updateSegmentationMask: {0:3.1f}".format(time.time() - start))
return True
def showSegmentationsIn3D(self):
# add closed surface representation
if self._segmentNode:
self._segmentNode.CreateClosedSurfaceRepresentation()
view = slicer.app.layoutManager().threeDWidget(0).threeDView()
view.resetFocalPoint()
def updateServerUrlGUIFromSettings(self):
# Save current server URL to the top of history
settings = qt.QSettings()
serverUrlHistory = settings.value("MONAILabel/serverUrlHistory")
wasBlocked = self.ui.serverComboBox.blockSignals(True)
self.ui.serverComboBox.clear()
if serverUrlHistory:
self.ui.serverComboBox.addItems(serverUrlHistory.split(";"))
self.ui.serverComboBox.setCurrentText(settings.value("MONAILabel/serverUrl"))
self.ui.serverComboBox.blockSignals(wasBlocked)
def createFiducialNode(self, name, onMarkupNodeModified, color):
displayNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsDisplayNode")
displayNode.SetTextScale(0)
displayNode.SetSelectedColor(color)
fiducialNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsFiducialNode")
fiducialNode.SetName(name)
fiducialNode.SetAndObserveDisplayNodeID(displayNode.GetID())
fiducialNodeObservers = []
self.addFiducialNodeObserver(fiducialNode, onMarkupNodeModified)
return fiducialNode, fiducialNodeObservers
def removeFiducialNodeObservers(self, fiducialNode, fiducialNodeObservers):
if fiducialNode and fiducialNodeObservers:
for observer in fiducialNodeObservers:
fiducialNode.RemoveObserver(observer)
def addFiducialNodeObserver(self, fiducialNode, onMarkupNodeModified):
fiducialNodeObservers = []
if fiducialNode:
eventIds = [slicer.vtkMRMLMarkupsNode.PointPositionDefinedEvent]
for eventId in eventIds:
fiducialNodeObservers.append(fiducialNode.AddObserver(eventId, onMarkupNodeModified))
return fiducialNodeObservers
def scribblesLayersPresent(self):
scribbles_exist = False
if self._segmentNode is not None:
segmentationNode = self._segmentNode
segmentation = segmentationNode.GetSegmentation()
numSegments = segmentation.GetNumberOfSegments()
segmentIds = [segmentation.GetNthSegmentID(i) for i in range(numSegments)]
scribbles_exist = sum([int("scribbles" in sid) for sid in segmentIds]) > 0
return scribbles_exist
def onStartScribbling(self):
if not self._segmentNode:
return
logging.debug("Scribbles start event")
if (not self.scribblesLayersPresent()) and (self._scribblesEditorWidget is None):
# add background, layer index = -2 [2], color = red
self._segmentNode.GetSegmentation().AddEmptySegment(
"background_scribbles", "background_scribbles", [1.0, 0.0, 0.0]
)
# add foreground, layer index = -1 [3], color = green
self._segmentNode.GetSegmentation().AddEmptySegment(
"foreground_scribbles", "foreground_scribbles", [0.0, 1.0, 0.0]
)
# change segmentation display properties to "see through" the scribbles
# further explanation at:
# https://apidocs.slicer.org/master/classvtkMRMLSegmentationDisplayNode.html
segmentationDisplayNode = self._segmentNode.GetDisplayNode()
# background
opacity = 0.2
segmentationDisplayNode.SetSegmentOpacity2DFill("background_scribbles", opacity)
segmentationDisplayNode.SetSegmentOpacity2DOutline("background_scribbles", opacity)
# foreground
segmentationDisplayNode.SetSegmentOpacity2DFill("foreground_scribbles", opacity)
segmentationDisplayNode.SetSegmentOpacity2DOutline("foreground_scribbles", opacity)
# create segmentEditorWidget to access "Paint" and "Erase" segmentation tools
# these will be used to draw scribbles
self._scribblesEditorWidget = slicer.qMRMLSegmentEditorWidget()
self._scribblesEditorWidget.setMRMLScene(slicer.mrmlScene)
segmentEditorNode = slicer.vtkMRMLSegmentEditorNode()
# adding new scribbles can overwrite a new one-hot vector, hence erase any existing
# labels - this is not a desired behaviour hence we swith to overlay mode that enables drawing
# scribbles without changing existing labels. Further explanation at:
# https://discourse.slicer.org/t/how-can-i-set-masking-settings-on-a-segment-editor-effect-in-python/4406/7
segmentEditorNode.SetOverwriteMode(slicer.vtkMRMLSegmentEditorNode.OverwriteNone)
# add all nodes to the widget
slicer.mrmlScene.AddNode(segmentEditorNode)
self._scribblesEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode)
self._scribblesEditorWidget.setSegmentationNode(self._segmentNode)
self._scribblesEditorWidget.setMasterVolumeNode(self._volumeNode)
def onUpdateScribbles(self):
logging.info("Scribbles update event")
scribblesMethod = self.ui.scribblesMethodSelector.currentText
scribbles_in = None
result_file = None
try:
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
# get scribbles + label
segmentationNode = self._segmentNode
labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode")
slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode(
segmentationNode, labelmapVolumeNode, self._volumeNode
)
scribbles_in = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name
self.reportProgress(5)
# save scribbles + label to file
slicer.util.saveNode(labelmapVolumeNode, scribbles_in)
self.reportProgress(30)
self.updateServerSettings()
self.reportProgress(60)
# try to first fetch vtkMRMLAnnotationROINode
roiNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLAnnotationROINode")
if roiNode == None: # if vtkMRMLAnnotationROINode not present, then check for vtkMRMLMarkupsROINode node
roiNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLMarkupsROINode")
# if roi node found, then try to get roi
selected_roi = self.getROIPointsXYZ(roiNode)
# send scribbles + label to server along with selected scribbles method
params = self.getParamsFromConfig("infer", scribblesMethod)
params.update({"roi": selected_roi})
image_file = self.current_sample["id"]
result_file, params = self.logic.infer(
scribblesMethod, image_file, params, scribbles_in, session_id=self.getSessionId()
)
# display result from server
self.reportProgress(90)
_, segment = self.currentSegment()
label = segment.GetName()
self.updateSegmentationMask(result_file, [label])
except:
slicer.util.errorDisplay(
"Failed to post process label on MONAI Label Server using {}".format(scribblesMethod),
detailedText=traceback.format_exc(),
)
finally:
qt.QApplication.restoreOverrideCursor()
self.reportProgress(100)
# clear all temporary files
if scribbles_in and os.path.exists(scribbles_in):
os.unlink(scribbles_in)
if result_file and os.path.exists(result_file):
os.unlink(result_file)
def getROIPointsXYZ(self, roiNode):
if roiNode == None:
return []
v = self._volumeNode
RasToIjkMatrix = vtk.vtkMatrix4x4()
v.GetRASToIJKMatrix(RasToIjkMatrix)
roi_points_ras = [0.0] * 6
if roiNode.__class__.__name__ == "vtkMRMLMarkupsROINode":
# for vtkMRMLMarkupsROINode
print(roiNode.__class__.__name__)
center = [0] * 3
roiNode.GetCenter(center)
roi_points_ras = [(x - s / 2, x + s / 2) for x, s in zip(center, roiNode.GetSize())]
roi_points_ras = [item for sublist in roi_points_ras for item in sublist]
elif roiNode.__class__.__name__ == "vtkMRMLAnnotationROINode":
# for vtkMRMLAnnotationROINode (old method)
print(roiNode.__class__.__name__)
roiNode.GetBounds(roi_points_ras)
else:
# if none found then best to return empty list
return []
min_points_ras = [roi_points_ras[0], roi_points_ras[2], roi_points_ras[4], 1.0]
max_points_ras = [roi_points_ras[0 + 1], roi_points_ras[2 + 1], roi_points_ras[4 + 1], 1.0]
min_points_ijk = RasToIjkMatrix.MultiplyDoublePoint(min_points_ras)
max_points_ijk = RasToIjkMatrix.MultiplyDoublePoint(max_points_ras)
min_points_ijk = [round(i) for i in min_points_ijk]
max_points_ijk = [round(i) for i in max_points_ijk]
roi_points_ijk = [val for pair in zip(min_points_ijk[0:3], max_points_ijk[0:3]) for val in pair]
logging.debug("RAS: {}; IJK: {}".format(roi_points_ras, roi_points_ijk))
# print("RAS: {}; IJK: {}".format(roi_points_ras, roi_points_ijk))
return roi_points_ijk
def onClearScribblesSegmentNodes(self):
# more explanation on this at:
# https://discourse.slicer.org/t/how-to-clear-segmentation/7433/4
# clear "scribbles" segment before saving the label
if not self._segmentNode:
return
segmentation = self._segmentNode
num_segments = segmentation.GetSegmentation().GetNumberOfSegments()
for i in range(num_segments):
segmentId = segmentation.GetSegmentation().GetNthSegmentID(i)
if "scribbles" in segmentId:
logging.info("clearning {}".format(segmentId))
labelMapRep = slicer.vtkOrientedImageData()
segmentation.GetBinaryLabelmapRepresentation(segmentId, labelMapRep)
vtkSegmentationCore.vtkOrientedImageDataResample.FillImage(labelMapRep, 0, labelMapRep.GetExtent())
slicer.vtkSlicerSegmentationsModuleLogic.SetBinaryLabelmapToSegment(
labelMapRep, segmentation, segmentId, slicer.vtkSlicerSegmentationsModuleLogic.MODE_REPLACE
)
def onClearScribbles(self):
# reset scribbles mode
self.scribblesMode = None
# clear scribbles editor widget
if self._scribblesEditorWidget:
widget = self._scribblesEditorWidget
del widget
self._scribblesEditorWidget = None
# remove "scribbles" segments from label
self.onClearScribblesSegmentNodes()
# reset UI elements associated with scribbles
self.ui.scribblesCollapsibleButton.collapsed = True
self.ui.paintScribblesButton.setChecked(False)
self.ui.eraseScribblesButton.setChecked(False)
self.ui.scribblesLabelSelector.setCurrentIndex(0)
def checkAndInitialiseScribbles(self):
if not self._segmentNode:
return
if self._scribblesEditorWidget is None:
self.onStartScribbling()
if self.scribblesMode is None:
self.changeScribblesMode(tool="Paint", layer="foreground_scribbles")
self.updateScribToolLayerFromMode()
def updateScribToolLayerFromMode(self):
if not self._segmentNode:
return
logging.info("Scribbles mode {} ".format(self.scribblesMode))
self.checkAndInitialiseScribbles()
# update tool/layer select for scribblesEditorWidget
tool, layer = self.getToolAndLayerFromScribblesMode()
if self._scribblesEditorWidget:
self._scribblesEditorWidget.setActiveEffectByName(tool)
self._scribblesEditorWidget.setCurrentSegmentID(layer)
# update brush type from checkbox
if tool in ("Paint", "Erase"):
is3dbrush = self.ui.brush3dCheckbox.checkState()
self.on3dBrushCheckbox(state=is3dbrush)
# update brush size from slider
brushSize = self.ui.brushSizeSlider.value
self.updateBrushSize(value=brushSize)
def getToolAndLayerFromScribblesMode(self):
if self.scribblesMode is not None:
return self.scribblesMode.split("+")
else:
# default modes
return "Paint", "foreground_scribbles"
def changeScribblesMode(self, tool=None, layer=None):
ctool, clayer = self.getToolAndLayerFromScribblesMode()
ctool = tool if tool != None else ctool
clayer = layer if layer != None else clayer
self.scribblesMode = "+".join([ctool, clayer])
def onPaintScribbles(self):
if not self._segmentNode:
return
if self.ui.eraseScribblesButton.checked:
self.ui.eraseScribblesButton.setChecked(False)
self.changeScribblesMode(tool="Paint" if self.ui.paintScribblesButton.checked else "None")
self.updateScribToolLayerFromMode()
def onEraseScribbles(self):
if not self._segmentNode:
return
if self.ui.paintScribblesButton.checked:
self.ui.paintScribblesButton.setChecked(False)
self.changeScribblesMode(tool="Erase" if self.ui.eraseScribblesButton.checked else "None")
self.updateScribToolLayerFromMode()
def onSelectScribblesLabel(self):
if not self._segmentNode:
return
index = self.ui.scribblesLabelSelector.currentIndex
index = 0 if index < 0 else index
selected = self.ui.scribblesLabelSelector.itemText(index)
layer = "foreground_scribbles" if selected == "Foreground" else "background_scribbles"
self.changeScribblesMode(layer=layer)
self.updateScribToolLayerFromMode()
def on3dBrushCheckbox(self, state):
logging.info("3D brush update {}".format(state))
self.checkAndInitialiseScribbles()
effect = self._scribblesEditorWidget.activeEffect()
# enable scribbles in 3d using a sphere brush
effect.setParameter("BrushSphere", state)
def updateBrushSize(self, value):
logging.info("brush size update {}".format(value))
if self.ui.paintScribblesButton.checked or self.ui.eraseScribblesButton.checked:
self.checkAndInitialiseScribbles()
effect = self._scribblesEditorWidget.activeEffect()
effect.setParameter("BrushAbsoluteDiameter", value)
class MONAILabelLogic(ScriptedLoadableModuleLogic):
def __init__(self, tmpdir=None, server_url=None, progress_callback=None, client_id=None):
ScriptedLoadableModuleLogic.__init__(self)
self.server_url = server_url
self.tmpdir = slicer.util.tempDirectory("slicer-monai-label") if tmpdir is None else tmpdir
self.client_id = client_id
self.volumeToSessions = dict()
self.progress_callback = progress_callback
def setDefaultParameters(self, parameterNode):
if not parameterNode.GetParameter("SegmentationModel"):
parameterNode.SetParameter("SegmentationModel", "")
if not parameterNode.GetParameter("DeepgrowModel"):
parameterNode.SetParameter("DeepgrowModel", "")
if not parameterNode.GetParameter("ScribblesMethod"):
parameterNode.SetParameter("ScribblesMethod", "")
def __del__(self):
shutil.rmtree(self.tmpdir, ignore_errors=True)
def setServer(self, server_url=None):
self.server_url = server_url if server_url else "http://127.0.0.1:8000"
def setClientId(self, client_id):
self.client_id = client_id if client_id else "user-xyz"
def setProgressCallback(self, progress_callback=None):
self.progress_callback = progress_callback
def reportProgress(self, progress):
if self.progress_callback:
self.progress_callback(progress)
def info(self):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).info()
def next_sample(self, strategy, params={}):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).next_sample(strategy, params)
def create_session(self, image_in):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).create_session(image_in)
def get_session(self, session_id):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).get_session(session_id)
def remove_session(self, session_id):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).remove_session(session_id)
def upload_image(self, image_in, image_id=None):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).upload_image(image_in, image_id)
def save_label(self, image_in, label_in, params):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).save_label(
image_in, label_in, params=params
)
def infer(self, model, image_in, params={}, label_in=None, file=None, session_id=None):
logging.debug("Preparing input data for segmentation")
self.reportProgress(0)
client = MONAILabelClient(self.server_url, self.tmpdir, self.client_id)
result_file, params = client.infer(model, image_in, params, label_in, file, session_id)
logging.debug(f"Image Response: {result_file}")
logging.debug(f"JSON Response: {params}")
self.reportProgress(100)
return result_file, params
def train_start(self, model=None, params={}):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_start(model, params)
def train_status(self, check_if_running):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_status(check_if_running)
def train_stop(self):
return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_stop()
class MONAILabelTest(ScriptedLoadableModuleTest):
def setUp(self):
slicer.mrmlScene.Clear()
def runTest(self):
self.setUp()
self.test_MONAILabel1()
def test_MONAILabel1(self):
self.delayDisplay("Test passed")
|
'''
Problem 017
If the numbers 1 to 5 are written out in words: one, two, three, four, five,
then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in
words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and
forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20
letters. The use of "and" when writing out numbers is in compliance with
British usage.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
ones_names = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',
'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen',
'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen',
'nineteen']
tens_names = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty', 'sixty',
'seventy', 'eighty', 'ninety']
def build_words(n):
if n == 1000:
return 'one' + 'thousand'
elif n > 99:
hundreds = ones_names[int(n / 100)] + 'hundred'
n = n % 100
if n == 0:
return hundreds
return hundreds + 'and' + build_words(n)
elif n > 19:
tens = tens_names[int(n / 10)]
n = n % 10
if n == 0:
return tens
return tens + ones_names[n]
else:
return ones_names[n]
def solve_problem():
total_letters = 0
for n in range(1,1001):
total_letters += len(build_words(n))
return(total_letters)
if __name__ == "__main__":
print(solve_problem())
|
import _plotly_utils.basevalidators
class TypesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="typesrc", parent_name="scattergeo.marker.gradient", **kwargs
):
super(TypesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="type", parent_name="scattergeo.marker.gradient", **kwargs
):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["radial", "horizontal", "vertical", "none"]),
**kwargs
)
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="colorsrc", parent_name="scattergeo.marker.gradient", **kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattergeo.marker.gradient", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
from dataclasses import dataclass, field
from datetime import datetime
from typing import Optional
from .cloud_provider import CloudProvider
from .instance import Instance
from .instance_status import InstanceStatus
@dataclass
class AwsInstance(Instance):
"""Extends Instance to add fields specific to the AWS compute sources."""
type: str = field(default="co.yellowdog.platform.model.AwsInstance", init=False)
id: Optional[str] = field(default=None, init=False)
"""The ID of this instance."""
instanceLifecycle: Optional[str] = None
"""The AWS EC2 instance lifecycle value for this instance."""
createdTime: Optional[datetime] = None
"""The date and time when this instance was first created."""
sourceId: Optional[str] = None
"""The ID of the compute source from which this instance was provisioned."""
imageId: Optional[str] = None
"""The machine image ID used for this instance."""
instanceType: Optional[str] = None
"""The machine type of this instance."""
provider: Optional[CloudProvider] = None
"""The cloud provider that supplies this instance."""
region: Optional[str] = None
"""The region where this instance is provisioned."""
status: Optional[InstanceStatus] = None
"""The status of this instance."""
subregion: Optional[str] = None
"""The subregion where this instance is provisioned."""
privateIpAddress: Optional[str] = None
"""The private IP address of this instance."""
publicIpAddress: Optional[str] = None
"""The public IP address of this instance."""
hostname: Optional[str] = None
"""The hostname of this instance."""
|
"""Basic PDFs are provided here.
Gauss, exponential... that can be used together with Functors to build larger models.
"""
# Copyright (c) 2021 zfit
import contextlib
import numpy as np
import tensorflow as tf
import zfit.z.numpy as znp
from zfit import z
from ..core.basepdf import BasePDF
from ..core.space import ANY_LOWER, ANY_UPPER, Space
from ..util import ztyping
from ..util.exception import (AnalyticIntegralNotImplemented,
BreakingAPIChangeError)
from ..util.warnings import warn_advanced_feature
class Exponential(BasePDF):
_N_OBS = 1
def __init__(self, lam=None, obs: ztyping.ObsTypeInput = None, name: str = "Exponential", lambda_=None):
"""Exponential function exp(lambda * x).
The function is normalized over a finite range and therefore a pdf. So the PDF is precisely
defined as :math:`\\frac{ e^{\\lambda \\cdot x}}{ \\int_{lower}^{upper} e^{\\lambda \\cdot x} dx}`
Args:
lam: Accessed as parameter "lambda".
obs: The :py:class:`~zfit.Space` the pdf is defined in.
name: Name of the pdf.
dtype:
"""
if lambda_ is not None:
if lam is None:
lam = lambda_
else:
raise BreakingAPIChangeError("The 'lambda' parameter has been renamed from 'lambda_' to 'lam'.")
params = {'lambda': lam}
super().__init__(obs, name=name, params=params)
self._calc_numerics_data_shift = lambda: z.constant(0.)
if not self.space.has_limits:
warn_advanced_feature("Exponential pdf relies on a shift of the input towards 0 to keep the numerical "
f"stability high. The space {self.space} does not have limits set and no shift"
f" will occure. To set it manually, set _numerics_data_shift to the expected"
f" average values given to this function _in case you want things to be set_."
f"If this sounds unfamiliar, regard this as an error and use a normalization range.",
identifier='exp_shift')
self._set_numerics_data_shift(self.space)
def _unnormalized_pdf(self, x):
lambda_ = self.params['lambda']
x = x.unstack_x()
probs = znp.exp(lambda_ * (self._shift_x(x)))
tf.debugging.assert_all_finite(probs, f"Exponential PDF {self} has non valid values. This is likely caused"
f" by numerical problems: if the exponential is too steep, this will"
f" yield NaNs or infs. Make sure that your lambda is small enough and/or"
f" the initial space is in the same"
f" region as your data (and norm_range, if explicitly set differently)."
f" If this issue still persists, please oben an issue on Github:"
f" https://github.com/zfit/zfit")
return probs # Don't use exp! will overflow.
def _shift_x(self, x):
return x - self._calc_numerics_data_shift()
@contextlib.contextmanager
def _set_numerics_data_shift(self, limits):
if limits:
def calc_numerics_data_shift():
lower, upper = [], []
for limit in limits:
low, up = limit.rect_limits
lower.append(z.convert_to_tensor(low[:, 0]))
upper.append(z.convert_to_tensor(up[:, 0]))
lower = z.convert_to_tensor(lower)
upper = z.convert_to_tensor(upper)
lower_val = znp.min(lower, axis=0)
upper_val = znp.max(upper, axis=0)
return (upper_val + lower_val) / 2
old_value = self._calc_numerics_data_shift
self._calc_numerics_data_shift = calc_numerics_data_shift
yield
self._calc_numerics_data_shift = old_value
else:
yield
# All hooks are needed to set the right shift when "entering" the pdf. The norm range is taken where both are
# available. No special need needs to be taken for sampling (it samples from the correct region, the limits, and
# uses the predictions by the `unnormalized_prob` -> that is shifted correctly
def _single_hook_integrate(self, limits, norm_range, x):
with self._set_numerics_data_shift(norm_range):
return super()._single_hook_integrate(limits, norm_range, x=x)
def _single_hook_analytic_integrate(self, limits, norm_range):
with self._set_numerics_data_shift(limits=norm_range):
return super()._single_hook_analytic_integrate(limits, norm_range)
def _single_hook_numeric_integrate(self, limits, norm_range):
with self._set_numerics_data_shift(limits=norm_range):
return super()._single_hook_numeric_integrate(limits, norm_range)
def _single_hook_partial_integrate(self, x, limits, norm_range):
with self._set_numerics_data_shift(limits=norm_range):
return super()._single_hook_partial_integrate(x, limits, norm_range)
def _single_hook_partial_analytic_integrate(self, x, limits, norm_range):
with self._set_numerics_data_shift(limits=norm_range):
return super()._single_hook_partial_analytic_integrate(x, limits, norm_range)
def _single_hook_partial_numeric_integrate(self, x, limits, norm_range):
with self._set_numerics_data_shift(limits=norm_range):
return super()._single_hook_partial_numeric_integrate(x, limits, norm_range)
# def _single_hook_normalization(self, limits):
# with self._set_numerics_data_shift(limits=limits):
# return super()._single_hook_normalization(limits)
#
# # TODO: remove component_norm_range? But needed for integral?
# def _single_hook_unnormalized_pdf(self, x, name):
# if component_norm_range.limits_are_false:
# component_norm_range = self.space
# if component_norm_range.limits_are_set:
# with self._set_numerics_data_shift(limits=component_norm_range):
# return super()._single_hook_unnormalized_pdf(x, name)
# else:
# return super()._single_hook_unnormalized_pdf(x, name)
#
def _single_hook_pdf(self, x, norm_range):
with self._set_numerics_data_shift(limits=norm_range):
return super()._single_hook_pdf(x, norm_range)
#
def _single_hook_log_pdf(self, x, norm_range):
with self._set_numerics_data_shift(limits=norm_range):
return super()._single_hook_log_pdf(x, norm_range)
def _single_hook_sample(self, n, limits, x=None):
with self._set_numerics_data_shift(limits=limits):
return super()._single_hook_sample(n, limits, x)
def _exp_integral_from_any_to_any(limits, params, model):
lambda_ = params['lambda']
lower, upper = limits.rect_limits
# if any(np.isinf([lower, upper])):
# raise AnalyticIntegralNotImplemented
integral = _exp_integral_func_shifting(lambd=lambda_, lower=lower, upper=upper, model=model)
return integral[0]
def _exp_integral_func_shifting(lambd, lower, upper, model):
def raw_integral(x):
return z.exp(lambd * (model._shift_x(x))) / lambd # needed due to overflow in exp otherwise
lower_int = raw_integral(x=lower)
upper_int = raw_integral(x=upper)
integral = (upper_int - lower_int)
return integral
def exp_icdf(x, params, model):
lambd = params['lambda']
x = z.unstack_x(x)
x = model._shift_x(x)
return znp.log(lambd * x) / lambd
# Exponential.register_inverse_analytic_integral(exp_icdf) # TODO: register icdf for exponential
# TODO: cleanup, make cdf registrable _and_ inverse integral, but real
limits = Space(axes=0, limits=(ANY_LOWER, ANY_UPPER))
Exponential.register_analytic_integral(func=_exp_integral_from_any_to_any, limits=limits)
|
import os
import math
'''
Metric goal is reached
'''
#Sytem settings
dev_mode = False
grid_data_folder = os.path.join(os.getcwd(), 'raw_data_generation', 'input')
raw_data_folder = os.path.join(os.getcwd(), 'raw_data')
datasets_folder = os.path.join(os.getcwd(), 'datasets')
test_data_folder = os.path.join(os.getcwd(), 'test')
models_folder = os.path.join(os.getcwd(), 'models')
local_machine_tz = 'Europe/Berlin' # timezone; it's important for Powerfactory
#Deep learning settings
learning_config = {
"dataset": "PV_noPV_7day_20k",
"RNN model settings": [1, 2, 6, 2], # number of input features, number of output features, number of features in hidden state, number of of layers
"number of epochs": 100,
"learning rate": 1*10**-6,
"activation function": 'tanh', # relu, tanh
"mini batch size": 60,
"optimizer": 'Adam', # Adam, SGD
"k folds": 5, #choose 1 to not do crossval
"cross_validation": True,
"early stopping": False,
"LR adjustment": 'LR controlled', #None, 'warm up' , 'LR controlled'
"percentage of epochs for warm up": 10, #warm up not performed if percentage of epochs for warm up * epochs > epochs
"train test split": 0.2, #if int, used as number up testing examples; if float, used as share of data
"baseline": False,
"metrics": ['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'],
"cross_val_metrics": ['fit_time', 'test_accuracy', 'test_precision_macro', 'test_recall_macro', 'test_f1_macro'],
"plot samples": True,
"classifier": "RNN" # RNN
}
#########################################################################
### only change if new dataset or raw data should be created ###
#########################################################################
# Dataset settings
raw_data_set_name = 'PV_noPV' #'malfunctions_in_LV_grid_dataset', 'PV_noPV', dummy
dataset_available = False #set to False to recreate instances from raw data
raw_data_available = True #set to False to generate raw data using the simulation; leave True if DIGSILENT POWRFACTORY is not available
add_data = True #raw_data_available = False has to be set for this! set add_data = True to add more data to raw data;
add_noise = False
accuracy = 0.01 #accuracy according to the Genauigkeitsklasse of SmartMeter (1 = 1% i.e.)
sample_length = 7 * 96 #96 datapoints per day
smartmeter_ratedvoltage_range = [400, 415]
smartmeter_voltage_range = [363, 457]
number_of_samples = 20000
share_of_positive_samples = 0.5 #should be 0.5! only chose values that yield real numbers as invers i.e. 0.2, 0.25, 0.5 > otherwise number of samples corrupted
number_of_grids = len([i for i in os.listdir(grid_data_folder) if os.path.isdir(os.path.join(grid_data_folder, i))])
float_decimal = 5 #decimals in dataset
#Powerfactory settings
user = 'FellnerD'
system_language = 0 #chose 0 for english, 1 for german according to the lagnuage of powerfactory installed on the system
parallel_computing = True
cores = 12 #cores to be used for parallel computing (when 64 available use 12 - 24)
reduce_result_file_size = True #save results as integers to save memory in csv
just_voltages = True #if False also P and Q results given
# Simulation settings
sim_length = 365 #simulation length in days (has to be equal or bigger than sample length
if sim_length < sample_length/96: print('Choose different simulation length or sample length (sim_length >= sample_length')
if raw_data_set_name == 'PV_noPV':
positive_samples_per_simrun = 5 #data from how many terminals are there in the grid minimally > determines how many yearly simulations have to be run and used for dataset creation
simruns = math.ceil((number_of_samples * share_of_positive_samples) / (positive_samples_per_simrun * number_of_grids) / (sim_length * 96/sample_length))
elif raw_data_set_name == 'malfunctions_in_LV_grid_dataset':
simruns = math.ceil((number_of_samples * share_of_positive_samples) / (number_of_grids) / int(
(sim_length * 96 / sample_length) - 1))
elif raw_data_set_name == 'dummy':
terms_per_simrun = 5 #data from how many terminals are there in the grid minimally > determines how many yearly simulations have to be run and used for dataset creation
simruns = math.ceil((number_of_samples * share_of_positive_samples) / (terms_per_simrun * number_of_grids) / (sim_length * 96/sample_length))
else:
simruns = 10 #number of datasets produced and also used per grid (location of malfunction/PVs... is varied)
step_size = 15 #simulation step size in minutes
percentage = 25 #percentage of busses with active PVs (PV proliferation)
control_curve_choice = 0 #for all PVs: choose control curve for 0 = cos(phi)(P), 1 = Q(P), 2 = brokenQ(P) (flat curve)
broken_control_curve_choice = 3 #for broken PV: choose control curve for 0 = cos(phi)(P), 1 = Q(P), 2 = broken Q(P) (flat curve), 3 = wrong Q(P) (inversed curve)
number_of_broken_devices = 1 #define number of devices to expereince malfunctions during simulation
load_scaling = 100 #general load scaling for all loads in simulation (does not apply to setup)
generation_scaling = 100 #general generation scaling for all generation units in simulation (does not apply to setup)
whole_year = True #if True malfunction is present from start of simulation on; if False malfunction is at random point
t_start = None #default(None): times inferred from profiles in data
t_end = None
#t_start = pd.Timestamp('2017-01-01 00:00:00', tz='utc') # example for custom sim time
#t_end = pd.Timestamp('2018-01-01 00:00:00', tz='utc') - pd.Timedelta(step_size + 'T')
|
#!/usr/bin/env python3
"""
Check lesson files and their contents.
"""
import os
import glob
import re
from argparse import ArgumentParser
from util import (Reporter, read_markdown, load_yaml, check_unwanted_files,
require)
__version__ = '0.3'
# Where to look for source Markdown files.
SOURCE_DIRS = ['', '_episodes', '_extras']
# Where to look for source Rmd files.
SOURCE_RMD_DIRS = ['_episodes_rmd']
# Required files: each entry is ('path': YAML_required).
# FIXME: We do not yet validate whether any files have the required
# YAML headers, but should in the future.
# The '%' is replaced with the source directory path for checking.
# Episodes are handled specially, and extra files in '_extras' are also handled
# specially. This list must include all the Markdown files listed in the
# 'bin/initialize' script.
REQUIRED_FILES = {
'%/CODE_OF_CONDUCT.md': True,
'%/CONTRIBUTING.md': False,
'%/LICENSE.md': True,
'%/README.md': False,
'%/_extras/discuss.md': True,
'%/_extras/guide.md': True,
'%/index.md': True,
'%/reference.md': True,
'%/setup.md': True,
}
# Episode filename pattern.
P_EPISODE_FILENAME = re.compile(r'/_episodes/(\d\d)-[-\w]+.md$')
# Pattern to match lines ending with whitespace.
P_TRAILING_WHITESPACE = re.compile(r'\s+$')
# Pattern to match figure references in HTML.
P_FIGURE_REFS = re.compile(r'<img[^>]+src="([^"]+)"[^>]*>')
# Pattern to match internally-defined Markdown links.
P_INTERNAL_LINK_REF = re.compile(r'\[([^\]]+)\]\[([^\]]+)\]')
# Pattern to match reference links (to resolve internally-defined references).
P_INTERNAL_LINK_DEF = re.compile(r'^\[([^\]]+)\]:\s*(.+)')
# Pattern to match {% include ... %} statements
P_INTERNAL_INCLUDE_LINK = re.compile(r'^{% include ([^ ]*) %}$')
# What kinds of blockquotes are allowed?
KNOWN_BLOCKQUOTES = {
'callout',
'challenge',
'checklist',
'discussion',
'keypoints',
'objectives',
'prereq',
'quotation',
'solution',
'testimonial'
}
# What kinds of code fragments are allowed?
KNOWN_CODEBLOCKS = {
'error',
'output',
'source',
'language-bash',
'html',
'language-make',
'language-matlab',
'language-python',
'language-r',
'language-shell',
'language-sql'
}
# What fields are required in teaching episode metadata?
TEACHING_METADATA_FIELDS = {
('title', str),
('teaching', int),
('exercises', int),
('questions', list),
('objectives', list),
('keypoints', list)
}
# What fields are required in break episode metadata?
BREAK_METADATA_FIELDS = {
('layout', str),
('title', str),
('break', int)
}
# How long are lines allowed to be?
# Please keep this in sync with .editorconfig!
MAX_LINE_LEN = 100
def main():
"""Main driver."""
args = parse_args()
args.reporter = Reporter()
check_config(args.reporter, args.source_dir)
check_source_rmd(args.reporter, args.source_dir, args.parser)
args.references = read_references(args.reporter, args.reference_path)
docs = read_all_markdown(args.source_dir, args.parser)
check_fileset(args.source_dir, args.reporter, list(docs.keys()))
check_unwanted_files(args.source_dir, args.reporter)
for filename in list(docs.keys()):
checker = create_checker(args, filename, docs[filename])
checker.check()
args.reporter.report()
if args.reporter.messages and not args.permissive:
exit(1)
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser(description="""Check episode files in a lesson.""")
parser.add_argument('-l', '--linelen',
default=False,
action="store_true",
dest='line_lengths',
help='Check line lengths')
parser.add_argument('-p', '--parser',
default=None,
dest='parser',
help='path to Markdown parser')
parser.add_argument('-r', '--references',
default=None,
dest='reference_path',
help='path to Markdown file of external references')
parser.add_argument('-s', '--source',
default=os.curdir,
dest='source_dir',
help='source directory')
parser.add_argument('-w', '--whitespace',
default=False,
action="store_true",
dest='trailing_whitespace',
help='Check for trailing whitespace')
parser.add_argument('--permissive',
default=False,
action="store_true",
dest='permissive',
help='Do not raise an error even if issues are detected')
args, extras = parser.parse_known_args()
require(args.parser is not None,
'Path to Markdown parser not provided')
require(not extras,
'Unexpected trailing command-line arguments "{0}"'.format(extras))
return args
def check_config(reporter, source_dir):
"""Check configuration file."""
config_file = os.path.join(source_dir, '_config.yml')
config = load_yaml(config_file)
reporter.check_field(config_file, 'configuration',
config, 'kind', 'lesson')
reporter.check_field(config_file, 'configuration',
config, 'carpentry', ('swc', 'dc', 'lc', 'cp'))
reporter.check_field(config_file, 'configuration', config, 'title')
reporter.check_field(config_file, 'configuration', config, 'email')
for defaults in [
{'values': {'root': '.', 'layout': 'page'}},
{'values': {'root': '..', 'layout': 'episode'}, 'scope': {'type': 'episodes', 'path': ''}},
{'values': {'root': '..', 'layout': 'page'}, 'scope': {'type': 'extras', 'path': ''}}
]:
reporter.check(defaults in config.get('defaults', []),
'configuration',
'"root" not set to "." in configuration')
def check_source_rmd(reporter, source_dir, parser):
"""Check that Rmd episode files include `source: Rmd`"""
episode_rmd_dir = [os.path.join(source_dir, d) for d in SOURCE_RMD_DIRS]
episode_rmd_files = [os.path.join(d, '*.Rmd') for d in episode_rmd_dir]
results = {}
for pat in episode_rmd_files:
for f in glob.glob(pat):
data = read_markdown(parser, f)
dy = data['metadata']
if dy:
reporter.check_field(f, 'episode_rmd',
dy, 'source', 'Rmd')
def read_references(reporter, ref_path):
"""Read shared file of reference links, returning dictionary of valid references
{symbolic_name : URL}
"""
if not ref_path:
raise Warning("No filename has been provided.")
result = {}
urls_seen = set()
with open(ref_path, 'r') as reader:
for (num, line) in enumerate(reader, 1):
if P_INTERNAL_INCLUDE_LINK.search(line): continue
m = P_INTERNAL_LINK_DEF.search(line)
message = '{}: {} not a valid reference: {}'
require(m, message.format(ref_path, num, line.rstrip()))
name = m.group(1)
url = m.group(2)
message = 'Empty reference at {0}:{1}'
require(name, message.format(ref_path, num))
unique_name = name not in result
unique_url = url not in urls_seen
reporter.check(unique_name,
ref_path,
'Duplicate reference name {0} at line {1}',
name, num)
reporter.check(unique_url,
ref_path,
'Duplicate definition of URL {0} at line {1}',
url, num)
result[name] = url
urls_seen.add(url)
return result
def read_all_markdown(source_dir, parser):
"""Read source files, returning
{path : {'metadata':yaml, 'metadata_len':N, 'text':text, 'lines':[(i, line, len)], 'doc':doc}}
"""
all_dirs = [os.path.join(source_dir, d) for d in SOURCE_DIRS]
all_patterns = [os.path.join(d, '*.md') for d in all_dirs]
result = {}
for pat in all_patterns:
for filename in glob.glob(pat):
data = read_markdown(parser, filename)
if data:
result[filename] = data
return result
def check_fileset(source_dir, reporter, filenames_present):
"""Are all required files present? Are extraneous files present?"""
# Check files with predictable names.
required = [p.replace('%', source_dir) for p in REQUIRED_FILES]
missing = set(required) - set(filenames_present)
for m in missing:
reporter.add(None, 'Missing required file {0}', m)
# Check episode files' names.
seen = []
for filename in filenames_present:
if '_episodes' not in filename:
continue
m = P_EPISODE_FILENAME.search(filename)
if m and m.group(1):
seen.append(m.group(1))
else:
reporter.add(
None, 'Episode {0} has badly-formatted filename', filename)
# Check for duplicate episode numbers.
reporter.check(len(seen) == len(set(seen)),
None,
'Duplicate episode numbers {0} vs {1}',
sorted(seen), sorted(set(seen)))
# Check that numbers are consecutive.
seen = sorted([int(s) for s in seen])
clean = True
for i in range(len(seen) - 1):
clean = clean and ((seen[i+1] - seen[i]) == 1)
reporter.check(clean,
None,
'Missing or non-consecutive episode numbers {0}',
seen)
def create_checker(args, filename, info):
"""Create appropriate checker for file."""
for (pat, cls) in CHECKERS:
if pat.search(filename):
return cls(args, filename, **info)
return NotImplemented
class CheckBase:
"""Base class for checking Markdown files."""
def __init__(self, args, filename, metadata, metadata_len, text, lines, doc):
"""Cache arguments for checking."""
self.args = args
self.reporter = self.args.reporter # for convenience
self.filename = filename
self.metadata = metadata
self.metadata_len = metadata_len
self.text = text
self.lines = lines
self.doc = doc
self.layout = None
def check(self):
"""Run tests."""
self.check_metadata()
self.check_line_lengths()
self.check_trailing_whitespace()
self.check_blockquote_classes()
self.check_codeblock_classes()
self.check_defined_link_references()
def check_metadata(self):
"""Check the YAML metadata."""
self.reporter.check(self.metadata is not None,
self.filename,
'Missing metadata entirely')
if self.metadata and (self.layout is not None):
self.reporter.check_field(
self.filename, 'metadata', self.metadata, 'layout', self.layout)
def check_line_lengths(self):
"""Check the raw text of the lesson body."""
if self.args.line_lengths:
over = [i for (i, l, n) in self.lines if (
n > MAX_LINE_LEN) and (not l.startswith('!'))]
self.reporter.check(not over,
self.filename,
'Line(s) too long: {0}',
', '.join([str(i) for i in over]))
def check_trailing_whitespace(self):
"""Check for whitespace at the ends of lines."""
if self.args.trailing_whitespace:
trailing = [
i for (i, l, n) in self.lines if P_TRAILING_WHITESPACE.match(l)]
self.reporter.check(not trailing,
self.filename,
'Line(s) end with whitespace: {0}',
', '.join([str(i) for i in trailing]))
def check_blockquote_classes(self):
"""Check that all blockquotes have known classes."""
for node in self.find_all(self.doc, {'type': 'blockquote'}):
cls = self.get_val(node, 'attr', 'class')
self.reporter.check(cls in KNOWN_BLOCKQUOTES,
(self.filename, self.get_loc(node)),
'Unknown or missing blockquote type {0}',
cls)
def check_codeblock_classes(self):
"""Check that all code blocks have known classes."""
for node in self.find_all(self.doc, {'type': 'codeblock'}):
cls = self.get_val(node, 'attr', 'class')
self.reporter.check(cls in KNOWN_CODEBLOCKS,
(self.filename, self.get_loc(node)),
'Unknown or missing code block type {0}',
cls)
def check_defined_link_references(self):
"""Check that defined links resolve in the file.
Internally-defined links match the pattern [text][label].
"""
result = set()
for node in self.find_all(self.doc, {'type': 'text'}):
for match in P_INTERNAL_LINK_REF.findall(node['value']):
text = match[0]
link = match[1]
if link not in self.args.references:
result.add('"{0}"=>"{1}"'.format(text, link))
self.reporter.check(not result,
self.filename,
'Internally-defined links may be missing definitions: {0}',
', '.join(sorted(result)))
def find_all(self, node, pattern, accum=None):
"""Find all matches for a pattern."""
assert isinstance(pattern, dict), 'Patterns must be dictionaries'
if accum is None:
accum = []
if self.match(node, pattern):
accum.append(node)
for child in node.get('children', []):
self.find_all(child, pattern, accum)
return accum
def match(self, node, pattern):
"""Does this node match the given pattern?"""
for key in pattern:
if key not in node:
return False
val = pattern[key]
if isinstance(val, str):
if node[key] != val:
return False
elif isinstance(val, dict):
if not self.match(node[key], val):
return False
return True
@staticmethod
def get_val(node, *chain):
"""Get value one or more levels down."""
curr = node
for selector in chain:
curr = curr.get(selector, None)
if curr is None:
break
return curr
def get_loc(self, node):
"""Convenience method to get node's line number."""
result = self.get_val(node, 'options', 'location')
if self.metadata_len is not None:
result += self.metadata_len
return result
class CheckNonJekyll(CheckBase):
"""Check a file that isn't translated by Jekyll."""
def check_metadata(self):
self.reporter.check(self.metadata is None,
self.filename,
'Unexpected metadata')
class CheckIndex(CheckBase):
"""Check the main index page."""
def __init__(self, args, filename, metadata, metadata_len, text, lines, doc):
super().__init__(args, filename, metadata, metadata_len, text, lines, doc)
self.layout = 'lesson'
def check_metadata(self):
super().check_metadata()
self.reporter.check(self.metadata.get('root', '') == '.',
self.filename,
'Root not set to "."')
class CheckEpisode(CheckBase):
"""Check an episode page."""
def check(self):
"""Run extra tests."""
super().check()
self.check_reference_inclusion()
def check_metadata(self):
super().check_metadata()
if self.metadata:
if 'layout' in self.metadata:
if self.metadata['layout'] == 'break':
self.check_metadata_fields(BREAK_METADATA_FIELDS)
else:
self.reporter.add(self.filename,
'Unknown episode layout "{0}"',
self.metadata['layout'])
else:
self.check_metadata_fields(TEACHING_METADATA_FIELDS)
def check_metadata_fields(self, expected):
"""Check metadata fields."""
for (name, type_) in expected:
if name not in self.metadata:
self.reporter.add(self.filename,
'Missing metadata field {0}',
name)
elif not isinstance(self.metadata[name], type_):
self.reporter.add(self.filename,
'"{0}" has wrong type in metadata ({1} instead of {2})',
name, type(self.metadata[name]), type_)
def check_reference_inclusion(self):
"""Check that links file has been included."""
if not self.args.reference_path:
return
for (i, last_line, line_len) in reversed(self.lines):
if last_line:
break
require(last_line,
'No non-empty lines in {0}'.format(self.filename))
include_filename = os.path.split(self.args.reference_path)[-1]
if include_filename not in last_line:
self.reporter.add(self.filename,
'episode does not include "{0}"',
include_filename)
class CheckReference(CheckBase):
"""Check the reference page."""
def __init__(self, args, filename, metadata, metadata_len, text, lines, doc):
super().__init__(args, filename, metadata, metadata_len, text, lines, doc)
self.layout = 'reference'
class CheckGeneric(CheckBase):
"""Check a generic page."""
def __init__(self, args, filename, metadata, metadata_len, text, lines, doc):
super().__init__(args, filename, metadata, metadata_len, text, lines, doc)
CHECKERS = [
(re.compile(r'CONTRIBUTING\.md'), CheckNonJekyll),
(re.compile(r'README\.md'), CheckNonJekyll),
(re.compile(r'index\.md'), CheckIndex),
(re.compile(r'reference\.md'), CheckReference),
(re.compile(r'_episodes/.*\.md'), CheckEpisode),
(re.compile(r'.*\.md'), CheckGeneric)
]
if __name__ == '__main__':
main()
|
# Imports
import torch
from labml_nn.transformers.switch import SwitchTransformer, SwitchTransformerLayer, SwitchFeedForward
from labml_nn.transformers import MultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
import numpy as np
from transformers import AutoConfig, AutoModel
import torch.nn as nn
import math
from torch.utils.data import Dataset, DataLoader
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import mean_squared_error
from random import choice
from sklearn.decomposition import PCA
from copy import deepcopy
from transformers import BertModel, BertConfig
# Custom dataset function to store Open Subtitles data
class CustomDataset(torch.utils.data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, input_ids, token_type_ids, attention_masks):
'Initialization'
self.input_ids = input_ids
self.token_type_ids = token_type_ids
self.attention_masks = attention_masks
def __len__(self):
'Denotes the total number of samples'
return len(self.input_ids)
def __getitem__(self, index):
'Generates one sample of data'
input_id = self.input_ids[index]
token_type_ID = self.token_type_ids[index]
attention_mask = self.attention_masks[index]
sample = {'input_ids':input_id, 'token_type_ids':token_type_ID , 'attention_mask':attention_mask}
return sample
# Weights init and switch init initialise the weights for the model as desribed in Switch Transformer paper
def weights_init(tensor: torch.Tensor):
if isinstance(tensor, nn.Linear):
switch_init(tensor.weight.data)
torch.nn.init.zeros_(tensor.bias.data)
if isinstance(tensor, nn.LayerNorm):
torch.nn.init.zeros_(tensor.weight.data)
torch.nn.init.zeros_(tensor.bias.data)
def switch_init(tensor: torch.Tensor, s: float = 0.1, mean: float=0) -> torch.Tensor:
fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(tensor)
std = math.sqrt(s/fan_in)
return torch.nn.init.trunc_normal_(tensor=tensor, mean=mean, std=std)
class LaBSE_Switch(nn.Module):
"""
Torch module for to create a Switch Transformer for LaBSE.
Can be used for other BERT based models too, just change the input_id
tokenization and word_embedding module.
Inputs:
config = dictionary of configuration
word_embeddings_module = torch module mapping token ids to word embeddings
Forward:
Input_ids = ids using labse tokenizer
attention_mask = binary, indicates to model which tokens should be attended to,
and which should not.
Outputs:
outputs = a dictionary containing x, counts, route_prob, n_dropped, logits, attention, values
See Switch Transformer paper to understand all except:
attention, values and logits, which are used during knowledge distillation.
"""
def __init__(self, config, word_embeddings_module):
super().__init__()
# set the switch transformer as the actual neural net
self.switch_model = SwitchTransformer(
SwitchTransformerLayer(
d_model=config['d_model'],
attn=MultiHeadAttention(config['heads'], config['d_model'], config['dropout']),
feed_forward=SwitchFeedForward(
capacity_factor=config['capacity_factor'],
drop_tokens=config['drop_tokens'],
is_scale_prob=config['is_scale_prob'],
n_experts=config['n_experts'],
expert=FeedForward(config['d_model'], config['d_ff'], config['dropout_ffn']),
d_model=config['d_model']),
dropout_prob=config['dropout']),
config['n_layers'],
d_out = int(768),
dropout_prob = config['dropout'])
# initialise weights
# self.switch_model.apply(weights_init)
# module that maps input tokens into embedding vectors
self.word_embeddings = word_embeddings_module
# get attention weights from teacher
# self.weight_init_from_teacher(teacher_model=teacher_model, int_matches=int_matches)
def weight_init_from_teacher(self, teacher_model, int_matches):
"""
Initialises attention modules of student with those of the teacher for the --- specific to LaBSE and DistilSwitch
int_matches should be a list of tuples of [(teacher_layer, student_layer),...]
e.g. int_matches = [(5,0),(11,2)] --> give attention weights of teacher layer 5 to student layer 0
"""
# teacher_model=load_teacher(device=torch.device('cuda'))
self.switch_model.layers[int_matches[1]].attn.query.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.query.weight
self.switch_model.layers[int_matches[1]].attn.query.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.query.bias
self.switch_model.layers[int_matches[1]].attn.key.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.key.weight
self.switch_model.layers[int_matches[1]].attn.key.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.key.bias
self.switch_model.layers[int_matches[1]].attn.value.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.value.weight
self.switch_model.layers[int_matches[1]].attn.value.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.value.bias
self.switch_model.layers[int_matches[1]].attn.output.weight = teacher_model.encoder.layer[int_matches[0]].attention.output.dense.weight
self.switch_model.layers[int_matches[1]].attn.output.bias = teacher_model.encoder.layer[int_matches[0]].attention.output.dense.bias
# self.switch_model.layers[int_matches[1]].norm_ff.weight = teacher_model.encoder.layer[int_matches[0]].output.LayerNorm.weight
# self.switch_model.layers[int_matches[1]].norm_ff.bias = teacher_model.encoder.layer[int_matches[0]].output.LayerNorm.bias
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
# masks and token type ids not used, as we're just creating sentence embeddings for classification tasks
# word embeddings of shape [batch, seq_len, d_model]
input_embeddings = self.word_embeddings(input_ids)
# model input on shape [seq_len, batch, d_model] and mask
_batch,_seq_len,_n_hid = input_embeddings.shape
#print(_n_hid)
# call switch transformer
outputs = self.switch_model(torch.reshape(input_embeddings, (_seq_len, _batch, _n_hid)),
attention_mask=None)
return outputs
# function to blackbox load the student for distillation - can be switch or bert based
def load_student(name, student_config, device, teacher_model, int_matches, N_LAYERS):
if name!='switch':
# for pretrained bert models - setup config
student_config = BertConfig.from_pretrained(name)
student_config.num_hidden_layers = N_LAYERS
student_config.output_hidden_states = True
student_config.output_attentions = True
student_config.use_cache = True
student_config.is_decoder = True
# load model and set input embeddings
student_model = BertModel.from_pretrained(name, config=student_config)
student_model.set_input_embeddings(teacher_model.get_input_embeddings())
student_model = student_model.float()
student_model.to(device=device)
return student_model
if name=='switch':
# create compressed word embeddings from those of the teacher
word_embeddings = deepcopy(teacher_model.get_input_embeddings())
compressed_word_embeddings = word_embedding_compression(word_embeddings, student_config['d_model'])
# create student model
student_model = LaBSE_Switch(config=student_config, word_embeddings_module=compressed_word_embeddings)
# initialise weights
student_model.switch_model.apply(weights_init)
student_model.weight_init_from_teacher(teacher_model=teacher_model, int_matches=int_matches)
# convert model to float32 and move to device
student_model = student_model.float()
student_model.to(device=device)
return student_model
# loads teacher model from Huggingface
def load_teacher(device):
teacher_config = AutoConfig.from_pretrained('sentence-transformers/LaBSE')
teacher_config.output_hidden_states = True
teacher_config.output_attentions = True
teacher_config.use_cache = True
teacher_config.is_decoder = True
teacher_model = AutoModel.from_pretrained('sentence-transformers/LaBSE', config=teacher_config)
teacher_model.float() # needs to be 32 bit precision to get decent results from distillation
teacher_model.to(device=device)
return teacher_model
# Adaptor for BERT based models
def simple_adaptor(batch, model_outputs):
# values need to be reformatted from Huggingface 'past_key_values' output
values = []
for i in model_outputs['past_key_values']:
values.append(i[1])
values = torch.stack(values)
attentions = []
for j in model_outputs['attentions']:
attentions.append(inv_softmax(j))
attentions = torch.stack(attentions)
# we use pooler output as logits
return {'logits': model_outputs['pooler_output'],
'hidden': model_outputs['hidden_states'],
#'attention': model_outputs['attentions'],
'attention':attentions,
'inputs_mask': batch['attention_mask'],
'value_relation': values,
'pooler_output':model_outputs['pooler_output']}
def inv_softmax(x,C=-50):
# reverses softmax operation - used in teacher_adaptor
# C variable sets the min value of the scores, -50 works well.
result = torch.log(x)
result = torch.where(result <= float('-inf'), torch.full_like(result,C), result)
return result
def teacher_adaptor(batch, model_outputs):
# selects relevant model and batch outputs used for distillation loss calculation
values = []
for i in model_outputs['past_key_values']:
values.append(i[1])
values = torch.stack(values)
attentions = []
for j in model_outputs['attentions']:
attentions.append(inv_softmax(j))
attentions = torch.stack(attentions)
# print(model_outputs['pooler_output'].requires_grad)
return {#'logits': model_outputs['last_hidden_state'],
'logits':model_outputs['pooler_output'],
'hidden': model_outputs['hidden_states'],
#'attention': model_outputs['attentions'],
'attention': attentions,
'inputs_mask': batch['attention_mask'],
'value_relation': values,
'pooler_output':model_outputs['pooler_output']}
# adaptor for switch model
def switch_student_adaptor(batch, model_outputs):
# selects relevant model and batch outputs and reformats them
# needs to have same shapes as teacher adaptor
# reformat attention
layers, len, len, batch_size, heads = model_outputs['attention'].shape
attention = model_outputs['attention'].reshape(layers, batch_size, heads, len, len)
# reformat logits
len, batch_size, d_model = model_outputs['logits'].shape
logits = model_outputs['logits'].reshape(batch_size, len, d_model)
# print(model_outputs['pooler_output'].requires_grad)
# reformat values
layers, len, batch_size, heads, embedding_per_head = model_outputs['values'].shape
values = model_outputs['values'].reshape(layers, batch_size, heads, len, embedding_per_head)
return {#'logits': logits,
'logits':model_outputs['pooler_output'],
'counts': model_outputs['counts'],
'attention': attention,
'inputs_mask': batch['attention_mask'],
'route_prob': model_outputs['route_prob'],
'n_dropped': model_outputs['n_dropped'],
'value_relation': values}
# Predict function evaluates model every epoch to show training progress
def predict(model, teacher_model, eval_dataset, step, device, STUDENT, BATCH_SIZE, eval_metric='cosine_similarity', feedback=True):
'''
model = student_model
teacher_model = labse
eval_dataset = num of dev set samples to test model on per callback
device = cuda or cpu
student = switch or !switch
eval_metric = metric to evaluate the model - mse or cosine_similarity
'''
model.eval()
student_logits = []
teacher_logits =[]
batch_counts = []
batch_n_dropped = []
batch_route_prob = []
dataloader = DataLoader(eval_dataset,batch_size=BATCH_SIZE)
print('Running callback function on {} dev set samples...'.format(len(eval_dataset)))
for batch in dataloader:
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
with torch.no_grad():
model_outputs = model(input_ids=input_ids, attention_mask=attention_mask)
logits_S = model_outputs['pooler_output']
logits_T = teacher_model(input_ids=input_ids, attention_mask=attention_mask)['pooler_output']
cpu_logits_S = logits_S.detach().cpu()
cpu_logits_T = logits_T.detach().cpu()
if STUDENT=='switch' and feedback==True:
counts = model_outputs['counts'].detach().cpu()
n_dropped = model_outputs['n_dropped']
route_prob = model_outputs['route_prob'].detach().cpu()
for i in range(len(cpu_logits_S)):
student_logits.append(cpu_logits_S[i].numpy())
teacher_logits.append(cpu_logits_T[i].numpy())
if STUDENT=='switch' and feedback==True:
for i in range(len(counts)):
batch_counts.append(counts[i].numpy())
batch_n_dropped.append(n_dropped[i])
batch_route_prob.append(route_prob[i].numpy())
model.train()
student_logits = np.array(student_logits)
teacher_logits = np.array(teacher_logits)
if eval_metric=='cosine_similarity':
similarities = np.diag(cosine_similarity(student_logits, teacher_logits))
print ("Average cosine similarity for these samples: ", np.mean(similarities))
if eval_metric=='mse':
mse_error = mean_squared_error(student_logits, teacher_logits)
print ("Average mean squared error for these samples: ", mse_error)
if STUDENT=='switch' and feedback==True:
switch_counts = np.array(batch_counts)
switch_n_dropped = np.array(batch_n_dropped)
switch_route_prob = np.array(batch_route_prob)
print('SWITCH BEHAVIOUR:')
print('Counts Shape: \n', switch_counts.shape)
print('Counts: \n', switch_counts)
print('N_dropped: \n', switch_n_dropped)
print('Route Prob: \n', switch_route_prob)
return torch.Tensor([np.mean(similarities)])
# generates random parameters for hyperparam tuning
def generate_random_params(params):
# input: params dictionary containing lists of possible values
chosen_params = {}
for param in params:
chosen_params[param] = choice(params[param])
return chosen_params
def word_embedding_compression(word_embedding_module, d_model):
"""
Compresses a given word_embedding_module (type torch.Embedding) into a module of d_model dimensionality.
"""
word_embedding_matrix = word_embedding_module.weight
assert word_embedding_matrix.shape[1]>=d_model, 'The desired word embedding dimensionality is greater than the teacher word embeddings. That is not compression! Make d_model smaller.'
# return the module if it's the same dimensionality
if word_embedding_matrix.shape[1]==d_model:
return word_embedding_module
# else compress
pca = PCA(n_components = d_model)
compressed_word_embedding_matrix = pca.fit_transform(word_embedding_matrix.detach().cpu().numpy())
compressed_word_embedding_matrix = torch.from_numpy(compressed_word_embedding_matrix)
word_embedding_module.weight = torch.nn.parameter.Parameter(compressed_word_embedding_matrix)
return word_embedding_module
|
import cv2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p2', "--path", help="path to input video")
parser.add_argument('-p1', "--spath", help="path where the images should be stored")
parser.add_argument('-n', "--num", help="number from which image label naming starts", type=int)
args = parser.parse_args()
num = args.num
cap = cv2.VideoCapture(args.path)
count = 0
path = args.spath
print(args.num, args.path, args.spath)
ret = True
while ret:
ret, frame=cap.read()
count += 1
if count % 10 == 0:
cv2.imwrite(path+str(num)+'.jpg', frame)
print(path+str(num)+'.jpg')
num += 1
|
# Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
class Justifier:
def __init__(self, **kw):
super().__init__(**kw)
self.justs = [0] * 9
self.offsets = [(0, 0, 0, 1, 1, 1, 1, 1, 1),
(0, -1, -2, 0, 0, 0, 1, 1, 1),
(0, -1, -2, 0, -1, -2, 0, 0, 0)]
def init_justs(self, justs):
for i in justs:
i = i // 3
os = self.offsets[i]
if os:
self.justs = [sum(x) for x in zip(self.justs, os)]
self.offsets[i] = None
def calc_just(self, justs):
for i in justs:
i = self.justs[i] + (i % 3)
if i == 1:
return 'justify-content-center'
elif i > 1:
return 'justify-content-end'
return 'justify-content-start'
|
"""A proto buffer based logging system for minitaur experiments.
The logging system records the time since reset, base position, orientation,
angular velocity and motor information (joint angle, speed, and torque) into a
proto buffer. See minitaur_logging.proto for more details. The episode_proto is
updated per time step by the environment and saved onto disk for each episode.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import time
import tensorflow as tf
import minitaur_logging_pb2
NUM_MOTORS = 8
def _update_base_state(base_state, values):
base_state.x = values[0]
base_state.y = values[1]
base_state.z = values[2]
def preallocate_episode_proto(episode_proto, max_num_steps):
"""Preallocate the memory for proto buffer.
Dynamically allocating memory as the protobuf expands causes unexpected delay
that is not tolerable with locomotion control.
Args:
episode_proto: The proto that holds the state/action data for the current
episode.
max_num_steps: The max number of steps that will be recorded in the proto.
The state/data over max_num_steps will not be stored in the proto.
"""
for _ in range(max_num_steps):
step_log = episode_proto.state_action.add()
step_log.info_valid = False
step_log.time.seconds = 0
step_log.time.nanos = 0
for _ in range(NUM_MOTORS):
motor_state = step_log.motor_states.add()
motor_state.angle = 0
motor_state.velocity = 0
motor_state.torque = 0
motor_state.action = 0
_update_base_state(step_log.base_position, [0, 0, 0])
_update_base_state(step_log.base_orientation, [0, 0, 0])
_update_base_state(step_log.base_angular_vel, [0, 0, 0])
def update_episode_proto(episode_proto, minitaur, action, step):
"""Update the episode proto by appending the states/action of the minitaur.
Note that the state/data over max_num_steps preallocated
(len(episode_proto.state_action)) will not be stored in the proto.
Args:
episode_proto: The proto that holds the state/action data for the current
episode.
minitaur: The minitaur instance. See envs.minitaur for details.
action: The action applied at this time step. The action is an 8-element
numpy floating-point array.
step: The current step index.
"""
max_num_steps = len(episode_proto.state_action)
if step >= max_num_steps:
tf.logging.warning(
"{}th step is not recorded in the logging since only {} steps were "
"pre-allocated.".format(step, max_num_steps))
return
step_log = episode_proto.state_action[step]
step_log.info_valid = minitaur.IsObservationValid()
time_in_seconds = minitaur.GetTimeSinceReset()
step_log.time.seconds = int(time_in_seconds)
step_log.time.nanos = int((time_in_seconds - int(time_in_seconds)) * 1e9)
motor_angles = minitaur.GetMotorAngles()
motor_velocities = minitaur.GetMotorVelocities()
motor_torques = minitaur.GetMotorTorques()
for i in range(minitaur.num_motors):
step_log.motor_states[i].angle = motor_angles[i]
step_log.motor_states[i].velocity = motor_velocities[i]
step_log.motor_states[i].torque = motor_torques[i]
step_log.motor_states[i].action = action[i]
_update_base_state(step_log.base_position, minitaur.GetBasePosition())
_update_base_state(step_log.base_orientation, minitaur.GetBaseRollPitchYaw())
_update_base_state(step_log.base_angular_vel,
minitaur.GetBaseRollPitchYawRate())
class MinitaurLogging(object):
"""A logging system that records the states/action of the minitaur."""
def __init__(self, log_path=None):
self._log_path = log_path
# TODO(jietan): Consider using recordio to write the logs.
def save_episode(self, episode_proto):
"""Save episode_proto to self._log_path.
self._log_path is the directory name. A time stamp is the file name of the
log file. For example, when self._log_path is "/tmp/logs/", the actual
log file would be "/tmp/logs/yyyy-mm-dd-hh:mm:ss".
Args:
episode_proto: The proto that holds the states/action for the current
episode that needs to be save to disk.
Returns:
The full log path, including the directory name and the file name.
"""
if not self._log_path or not episode_proto.state_action:
return self._log_path
if not tf.gfile.Exists(self._log_path):
tf.gfile.MakeDirs(self._log_path)
ts = time.time()
time_stamp = datetime.datetime.fromtimestamp(ts).strftime(
"%Y-%m-%d-%H:%M:%S")
log_path = os.path.join(self._log_path,
"minitaur_log_{}".format(time_stamp))
with tf.gfile.Open(log_path, "w") as f:
f.write(episode_proto.SerializeToString())
return log_path
def restore_episode(self, log_path):
"""Restore the episodic proto from the log path.
Args:
log_path: The full path of the log file.
Returns:
The minitaur episode proto.
"""
with tf.gfile.Open(log_path) as f:
content = f.read()
episode_proto = minitaur_logging_pb2.MinitaurEpisode()
episode_proto.ParseFromString(content)
return episode_proto
|
# -*- coding: utf-8 -*-
from unittest import TestCase
import six
from popolo_data.importer import Popolo
EXAMPLE_AREA = {
"id": "area/tartu_linn",
"identifiers": [
{
"identifier": "Q3032626",
"scheme": "wikidata"
}
],
"name": "Tartu linn",
"other_names": [
{
"lang": "fr",
"name": "Dixième circonscription législative d'Estonie",
"note": "multilingual"
},
{
"lang": "et",
"name": "Valimisringkond nr 10",
"note": "multilingual"
},
{
"lang": "en",
"name": "Electoral District 10 (Tartu)",
"note": "multilingual"
}
],
"type": "constituency"
}
class TestAreas(TestCase):
def test_empty_file_gives_no_areas(self):
popolo = Popolo({})
assert len(popolo.areas) == 0
def test_single_area_with_name(self):
popolo = Popolo({"areas": [EXAMPLE_AREA]})
assert len(popolo.areas) == 1
area = popolo.areas[0]
assert area.name == 'Tartu linn'
def test_area_id(self):
popolo = Popolo({"areas": [EXAMPLE_AREA]})
area = popolo.areas[0]
assert area.id == 'area/tartu_linn'
def test_area_type(self):
popolo = Popolo({"areas": [EXAMPLE_AREA]})
area = popolo.areas[0]
assert area.type == 'constituency'
def test_area_identifiers(self):
popolo = Popolo({"areas": [EXAMPLE_AREA]})
area = popolo.areas[0]
assert area.identifiers == [
{
"identifier": "Q3032626",
"scheme": "wikidata"
}
]
def test_area_other_names(self):
popolo = Popolo({"areas": [EXAMPLE_AREA]})
area = popolo.areas[0]
assert area.other_names == [
{
"lang": "fr",
"name": "Dixième circonscription législative d'Estonie",
"note": "multilingual"
},
{
"lang": "et",
"name": "Valimisringkond nr 10",
"note": "multilingual"
},
{
"lang": "en",
"name": "Electoral District 10 (Tartu)",
"note": "multilingual"
}
]
def test_area_wikidata(self):
popolo = Popolo({"areas": [EXAMPLE_AREA]})
area = popolo.areas[0]
assert area.wikidata == 'Q3032626'
def test_area_repr(self):
popolo = Popolo({"areas": [EXAMPLE_AREA]})
area = popolo.areas[0]
if six.PY2:
assert repr(area) == b"<Area: Tartu linn>"
else:
assert repr(area) == u"<Area: Tartu linn>"
def test_area_identity_equality_and_inequality(self):
popolo_a = Popolo({"areas": [EXAMPLE_AREA]})
area_a = popolo_a.areas[0]
popolo_b = Popolo({"areas": [EXAMPLE_AREA]})
area_b = popolo_b.areas[0]
assert area_a == area_b
assert not (area_a != area_b)
|
__author__ = 'alvertisjo'
from django.core.serializers import json
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import ConnectionError
class OpenProductData(object):
def getData(self):
# rowStep=100
# currentPage=0
# #####documentation: http://pod.opendatasoft.com/api/doc/#doc-datasets-search
# full_url='http://pod.opendatasoft.com/api/records/1.0/search' #http://pod.opendatasoft.com/api/records/1.0/search?dataset=pod_gtin&rows=10&start=11&facet=gpc_s_nm&facet=brand_nm&facet=owner_nm&facet=gln_nm&facet=prefix_nm
# dataset='pod_gtin'
# #print(full_url)
# try:
# response = requests.get(full_url, verify=False)
# #print response
# return response.json()
# except ConnectionError as e: # This is the correct syntax
# print "error: %s" %e
# return response.json()
# except Timeout as t: # This is the correct syntax
# print "Timeout error: %s" %t
# return json.dumps({"error":t.message})
# except:
# return json.dumps([])
pass
def readDataFromFile(self):
pass
def storeToGraph(self,data):
# POST http://localhost:7474/db/data/transaction/commit
# Accept: application/json; charset=UTF-8
# Content-Type: application/json
url= 'http://snf-561492.vm.okeanos.grnet.gr:7474/'
# {
# "statements" : [ {
# "statement" : "CREATE (n) RETURN id(n)"
# } ]
# }
pass
|
#!/usr/bin/python
import argparse
import subprocess
import sys
from aslc.Semantic import compileString
from aslc.GLSLBackend import GLSLBackend
Backend = GLSLBackend
# Parse the command line
argParser = argparse.ArgumentParser(description="AbstractGPU Shading Language Compiler")
argParser.add_argument('inputFiles', metavar='input file', type=str, nargs='+')
argParser.add_argument('-I', dest='includeDirectories', metavar='include directory', type=str, nargs='+', action='append')
argParser.add_argument('-D', dest='macroDefinitions', metavar='macro definitions', type=str, nargs='+', action='append')
args = argParser.parse_args()
includeDirectories = args.includeDirectories
macroDefinitions = args.macroDefinitions
inputFiles = args.inputFiles
outputPath = '.'
# Run CPP to preprocess
def preprocessInput(inputFile):
with open(inputFile, 'r') as f:
return ('# 1 "%s"\n' % inputFile) + f.read()
args = ['cpp', '-o', '-', '-nostdinc', '-E']
if includeDirectories is not None:
for include in includeDirectories:
args.append('-I')
args.append(include[0])
if macroDefinitions is not None:
for macro in macroDefinitions:
args.append('-D')
args.append(macro[0])
args.append(inputFile)
# Invoke the C preprocessor
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
sys.exit(proc.returncode)
return stdout
def processInputFile(inputFile):
preprocessed = preprocessInput(inputFile)
compiled = compileString(preprocessed)
return compiled
# Process the input files
for inputFile in inputFiles:
module = processInputFile(inputFile)
if module is None:
sys.exit(-1)
backend = Backend(module)
backend.generate(outputPath)
|
from django.views.generic import TemplateView
class HomePageView(TemplateView):
template_name = "home.html"
|
from . import Simulations
from . import Spacecraft
|
"""
@package: jsonutils
@script: test_JsonUtils.py
@purpose: Test Suite for JsonUtils.
@created: Aug 26, 2017
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior
@mailto: yorevs@hotmail.com
@site: https://github.com/yorevs/homesetup
@license: Please refer to <https://opensource.org/licenses/MIT>
"""
import os
import unittest
from hhslib.security import *
PASSPHRASE = '12345'
SAMPLE_IN_FILE_NAME = "resources/secret.in"
SAMPLE_OUT_FILE_NAME = "resources/secret.out"
OUT_FILE = "resources/outfile.out"
OUT_FILE_GPG = "resources/outfile.out.gpg"
ORIGINAL_FILE_CONTENTS = "HomeSetup Secrets"
ENCODED_FILE_CONTENTS = "SG9tZVNldHVwIFNlY3JldHM="
class TestHhsLib(unittest.TestCase):
# Setup tests
def setUp(self):
with open(SAMPLE_IN_FILE_NAME, 'w') as f_in:
f_in.write(ORIGINAL_FILE_CONTENTS)
with open(SAMPLE_OUT_FILE_NAME, 'w') as f_in:
f_in.write(ENCODED_FILE_CONTENTS)
# Teardown tests
def tearDown(self):
if os.path.exists(OUT_FILE):
os.remove(OUT_FILE)
if os.path.exists(OUT_FILE_GPG):
os.remove(OUT_FILE_GPG)
# TEST CASES ----------
# TC1 - Test encoding a file.
def test_should_encode_file(self):
with open(SAMPLE_IN_FILE_NAME, 'r') as f_in:
contents = str(f_in.read().strip())
self.assertEquals(ORIGINAL_FILE_CONTENTS, contents)
encode(SAMPLE_IN_FILE_NAME, OUT_FILE)
with open(OUT_FILE, 'r') as f_out:
contents = str(f_out.read().strip())
self.assertEquals(ENCODED_FILE_CONTENTS, contents)
# TC2 - Test decoding a file.
def test_should_decode_file(self):
with open(SAMPLE_OUT_FILE_NAME, 'r') as f_in:
contents = str(f_in.read().strip())
self.assertEquals(ENCODED_FILE_CONTENTS, contents)
decode(SAMPLE_OUT_FILE_NAME, OUT_FILE)
with open(OUT_FILE, 'r') as f_out:
contents = str(f_out.read().strip())
self.assertEquals(ORIGINAL_FILE_CONTENTS, contents)
# TC3 - Test encrypting a file.
def test_should_encrypt_decrypt_file(self):
with open(SAMPLE_IN_FILE_NAME, 'r') as f_in:
contents = str(f_in.read().strip())
self.assertEquals(ORIGINAL_FILE_CONTENTS, contents)
encrypt(SAMPLE_IN_FILE_NAME, OUT_FILE_GPG, PASSPHRASE)
decrypt(OUT_FILE_GPG, OUT_FILE, PASSPHRASE)
with open(OUT_FILE, 'r') as f_out:
contents = str(f_out.read().strip())
self.assertEquals(ORIGINAL_FILE_CONTENTS, contents)
# Program entry point.
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestHhsLib)
unittest.TextTestRunner(verbosity=2).run(suite)
|
import threading
import requests
# encryption
import Encrypt
import unicodedata
from ttk import Style, Button, Label, Entry, Progressbar, Checkbutton
from Tkinter import Tk, Frame, RIGHT, BOTH, RAISED
from Tkinter import TOP, X, N, LEFT
from Tkinter import END, Listbox, MULTIPLE
from Tkinter import Toplevel, DISABLED
from Tkinter import StringVar, Scrollbar
from multiprocessing import Queue
from random import choice, randint
from fbchat import log, client
from fbchat.graphql import *
# Wrapper for the client class just in case we need to modify client to make it work
class GuiClient(client.Client):
def __init__(self, email, password, user_agent=None, max_tries=5, session_cookies=None, logging_level=logging.INFO):
"""
Initializes and logs in the client
:param email: Facebook `email`, `id` or `phone number`
:param password: Facebook account password
:param user_agent: Custom user agent to use when sending requests. If `None`, user agent will be chosen from a premade list (see :any:`utils.USER_AGENTS`)
:param max_tries: Maximum number of times to try logging in
:param session_cookies: Cookies from a previous session (Will default to login if these are invalid)
:param logging_level: Configures the `logging level <https://docs.python.org/3/library/logging.html#logging-levels>`_. Defaults to `INFO`
:type max_tries: int
:type session_cookies: dict
:type logging_level: int
:raises: FBchatException on failed login
"""
self.sticky, self.pool = (None, None)
self._session = requests.session()
self.req_counter = 1
self.seq = "0"
self.payloadDefault = {}
self.client = 'mercury'
self.default_thread_id = None
self.default_thread_type = None
self.req_url = ReqUrl()
self.most_recent_message = None
self.most_recent_messages_queue = Queue()
if not user_agent:
user_agent = choice(USER_AGENTS)
self._header = {
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': self.req_url.BASE,
'Origin': self.req_url.BASE,
'User-Agent': user_agent,
'Connection': 'keep-alive',
}
handler.setLevel(logging_level)
# If session cookies aren't set, not properly loaded or gives us an invalid session, then do the login
if not session_cookies or not self.setSession(session_cookies) or not self.isLoggedIn():
self.login(email, password, max_tries)
else:
self.email = email
self.password = password
def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):
self.markAsDelivered(author_id, thread_id)
self.markAsRead(author_id)
if (message_object is not None):
self.most_recent_message = message_object
self.most_recent_messages_queue.put(message_object)
def stopListening(self):
"""Cleans up the variables from startListening"""
print("Logging off...")
self.listening = False
self.sticky, self.pool = (None, None)
def listen(self, markAlive=True):
"""
Initializes and runs the listening loop continually
:param markAlive: Whether this should ping the Facebook server each time the loop runs
:type markAlive: bool
"""
self.startListening()
self.onListening()
while self.listening and self.doOneListen(markAlive):
pass
self.stopListening()
class GUI(Frame):
"""
This is the root window
"""
def __init__(self, parent, client):
self.queue = Queue()
# I got sick of filling in the login parameters repeatedly,
# for the sake of testing I will leave it like this and clear it before finishing the gui
self.email = ""
self.password = ""
self.name = ""
self.parent = parent
self.initialized = False
self.loadWindow = None
self.remember = False
self.client = None
self.msg_list = None
self.changingConvo = False
self.loginScreen()
def centerWindow(self, notself=None):
"""
This centers the window into place
if notself is set, then it centers
the notself window
@param:
notself - TKobject
"""
if notself is not None: # notself is primarly for progressbar
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - self.w / 2) / 2
y = (sh - self.h / 2) / 2
notself.geometry('%dx%d+%d+%d' % (self.w / 1.8, self.h / 1.8, x, y))
else:
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - self.w) / 2
y = (sh - self.h) / 2
self.parent.geometry('%dx%d+%d+%d' % (self.w, self.h, x, y))
def startWindow(self):
"""
This method starts/creates the window for
the UI
"""
Frame.__init__(self, self.parent, background="white")
self.style = Style()
self.style.theme_use("default")
self.pack(fill=BOTH, expand=1)
if (not self.initialized):
self.centerWindow()
else:
self.parent.geometry('%dx%d' % (self.w, self.h))
self.initialized = True
def resetWindow(self):
"""
Resets the window
"""
if (self.initialized):
self.destroy()
if (self.loadWindow is not None):
self.loadWindow.destroy()
self.startWindow()
def loginScreen(self):
"""
First screen that user will see, will require Facebook credentials to be inputted
"""
# Resetting window
self.h = 150
self.w = 350
self.resetWindow()
self.parent.title("Welcome")
# Creating frame that takes in email
emailFrame = Frame(self)
emailFrame.pack(fill=X, side=TOP)
emailLabel = Label(emailFrame, text="Email:", background="white")
emailLabel.pack(side=LEFT, padx=15, pady=10)
self.emailEntry = Entry(emailFrame, width=30)
self.emailEntry.insert(0, self.email)
self.emailEntry.pack(side=LEFT, padx=35, pady=10)
# Done with email frame
# Creating password frame
passwordFrame = Frame(self)
passwordFrame.pack(fill=X, side=TOP)
passwordLabel = Label(passwordFrame, text="Password:", background="white")
passwordLabel.pack(side=LEFT, padx=15, pady=10)
self.passwordEntry = Entry(passwordFrame, show="*", width=30)
self.passwordEntry.bind("<Return>", self.start)
self.passwordEntry.insert(0, self.password)
self.passwordEntry.pack(side=LEFT, padx=35, pady=10)
# Done with password frame
# Creating bottom buttons
frame = Frame(self, borderwidth=1)
frame.pack(fill=BOTH, expand=True)
self.pack(fill=BOTH, expand=True)
exitButton = Button(self, text="Exit", command=self.parent.destroy)
exitButton.pack(side=RIGHT, padx=5, pady=5)
self.loginButton = Button(self, text="Log In", command=self.start)
self.loginButton.pack(side=RIGHT)
# Done with bottom buttons
def start(self, opt=""):
"""
Initiates login, starts loading screen.
"""
thread1 = ThreadedTask(self.queue, self.login)
thread2 = ThreadedTask(self.queue, self.loadingScreen)
thread2.start()
thread1.start()
self.checkThread(thread1, self.chatUI)
def loadingScreen(self):
"""
This starts the loading screen
and disables all buttons
"""
for i in self.winfo_children():
if Button == type(i):
i.configure(state=DISABLED)
self.loadWindow = Toplevel(self.parent)
loadingstring = "Logging in..."
loadinglabel = Label(self.loadWindow, text=loadingstring, background="white")
progressbar = Progressbar(self.loadWindow, orient="horizontal",
length=300, mode="indeterminate")
progressbar.pack(pady=self.h / 10)
loadinglabel.pack()
self.centerWindow(self.loadWindow)
self.loadWindow.title("Wait")
progressbar.start()
def login(self):
"""
Login with the inputted credentials from the loginScreen
"""
if(self.client is not None):
if(self.client.isLoggedIn()):
self.client.logout()
self.email = self.emailEntry.get()
self.password = self.passwordEntry.get()
# This will log into Facebook with the given credentials
self.client = GuiClient(self.email, self.password)
print(self.client._fetchInfo(self.client.uid)[self.client.uid].get('first_name'))
self.thread3 = ThreadedTask(self.queue, self.listen)
self.thread3.start()
def listen(self):
"""
We start the listening loop
"""
self.client.listen()
def chatUI(self):
"""
Chat GUI page
"""
self.h = 350
self.w = 700
self.resetWindow()
self.parent.title("Messenger")
# We make the chat side of the UI
self.right_frame = Frame(self)
self.right_frame.pack(side=RIGHT, fill='y')
self.messages_frame = Frame(self.right_frame)
self.messages_frame.pack(side=TOP)
self.my_msg = StringVar() # For messages to be sent.
self.my_msg.set("")
self.msg_scrollbar = Scrollbar(self.messages_frame) # Navigate through past messages
# Following will contain the messages
self.msg_list = Listbox(self.messages_frame, height=15, width=50, yscrollcommand=self.msg_scrollbar.set)
self.msg_scrollbar.config(command=self.msg_list.yview)
self.msg_scrollbar.pack(side=RIGHT, fill='y', padx=5)
self.msg_list.pack(side=RIGHT)
self.entry_field = Entry(self.right_frame, textvariable=self.my_msg)
self.entry_field.bind("<Return>", self.send)
self.send_button = Button(self.right_frame, text="Send", command=self.send)
self.entry_field.pack(side="top", fill=X, padx=5, pady=5)
self.send_button.pack(side="top")
self.exitButton = Button(self.right_frame, text="Exit", command=self.exit)
self.exitButton.pack(side="bottom", padx=5, pady=5)
# We make the the side that contains the other users.
self.left_frame = Frame(self)
self.left_frame.pack(side=LEFT, fill='y')
self.usr_scrollbar = Scrollbar(self.left_frame)
self.usr_list = Listbox(self.left_frame, height=15, width=50, yscrollcommand=self.usr_scrollbar.set)
self.usr_scrollbar.config(command=self.usr_list.yview)
self.usr_search_bar = Entry(self.left_frame, textvariable="")
self.usr_search_button = Button(self.left_frame, text="Search", command=self.search)
self.usr_search_bar.pack(side="top", fill=X, pady=2, padx=1)
self.usr_search_button.pack(side="top", fill=X, pady=2, padx=1)
self.usr_scrollbar.pack(side=RIGHT, fill='y', padx=5)
self.usr_list.pack(side=RIGHT, fill='y')
# The user loading logic is in the search function
self.search()
self.usr_list.bind('<Double-1>', self.changeConvo)
def search(self):
fresh_users = self.client.fetchAllUsers()
self.users = []
if (self.usr_search_bar.get() is not ""):
for user in fresh_users:
if (self.usr_search_bar.get() in user.name):
self.users.append(user)
else:
self.users = fresh_users
if (self.usr_list.size() is not 0):
self.usr_list.delete(0, END)
for user in self.users:
self.usr_list.insert(END, " " + user.name)
# By default I would just take the first conversation
self.currentUser = self.users[0] # TODO: fix IndexOutOfRange Error when searched for a string not found
self.usr_search_bar.delete(0, END)
def send(self, _=""):
"""
Send messages, will send whatever is in the message field and then clear it
"""
plaintext = self.entry_field.get()
key = randint(-60, 60)
ciphertext = Encrypt.encrypt(plaintext, key)
ciphertext = "{}Q_Q{}".format(key, ciphertext)
message = Message(text=unicode(ciphertext, "ascii"))
self.client.send(message, self.currentUser.uid)
self.entry_field.delete(0, END)
self.client.most_recent_message = message
self.msg_list.insert(0, self.name + ": " + plaintext)
self.msg_list.see(END)
def changeConvo(self, param):
"""
When you click on another user in the chat we update the page
"""
print("CHANGING CONVO")
selectionIndex = self.usr_list.curselection()
self.currentUser = self.users[selectionIndex[0]]
self.changingConvo = True
self.updateConversation()
def updateConversation(self):
"""
Clear the conversation box, reupdate with new conversation, pings facebook server if they got anything
"""
if (self.changingConvo): # we are changing the conversation/switching users
print("[updateConversation] we are changing conversation")
messages = self.client.fetchThreadMessages(self.currentUser.uid)
self.msg_list.delete(0, END)
for message in messages:
text = self.decrypt_w_uc(message)
self.msg_list.insert(0, self.client._fetchInfo(message.author)[message.author][
"first_name"] + ": " + text)
# The message listbox will automatically look at the last/"most recent" message
self.msg_list.see(END)
# We no longer need to change the conversation
self.changingConvo = False
else: # same user, but checking for new messages
# Sees last message from the message list box
last_message = self.msg_list.get(END)
if (self.client is not None and self.client.isLoggedIn() and self.client.most_recent_message is not None):
msg_object = self.client.most_recent_message
msg_author = self.client.most_recent_message.author
name = ""
if (msg_author is None):
msg_author = self.name
else:
name = self.client._fetchInfo(msg_author)[msg_author]["first_name"]
text = self.decrypt_w_uc(msg_object)
new_last_message = name + ": " + text
if (last_message != new_last_message):
# This is checking if were updating the current convo or refreshing convo
if (name + ": " in last_message):
while (self.client.most_recent_messages_queue.empty() is not True):
message = self.client.most_recent_messages_queue.get()
text = self.decrypt_w_uc(message)
self.msg_list.insert(END, self.client._fetchInfo(message.author)[message.author][
"first_name"] + ": " + text)
self.msg_list.see(END)
else:
messages = self.client.fetchThreadMessages(self.currentUser.uid)
self.msg_list.delete(0, END)
for message in messages:
text = self.decrypt_w_uc(message)
self.msg_list.insert(0, self.client._fetchInfo(message.author)[message.author][
"first_name"] + ": " + text)
self.msg_list.see(END)
self.client.most_recent_message = messages[0]
def decrypt_w_uc(self, message):
"""
Decrypt with unicode character check - will decrypt when necessary,
and then convert unicode to ascii so TCL won't freak out
Input: message -> fbchat.models.Message, Message object
Output: clean_text -> String
"""
clean_text = ""
if "Q_Q" in message.text: # to be decrypted
key, ciphertext = message.text.split("Q_Q")
clean_text = Encrypt.decrypt(ciphertext, int(key))
else:
clean_text = message.text
# now we do unicode and emoji
clean_clean_text = ""
for character in clean_text:
# if character not in emoji.UNICODE_EMOJI:
if type(character) is unicode:
clean_clean_text += unicodedata.normalize('NFKD', character).encode('ascii', 'replace')
else:
clean_clean_text += character
return clean_clean_text
def exit(self):
"""
Stops listening and ends GUI
"""
self.client.stopListening()
self.parent.destroy()
def checkThread(self, thread, function):
"""
This function checks to see if
the given thread is dead, if it
is not, it recalls a new checkThread.
After the thread is dead, it calls the
given function
@param:
thread - ThreadedTask
functoin - a function
"""
if thread.is_alive():
self.parent.after(1000, lambda: self.checkThread(thread, function))
else:
function()
class ThreadedTask(threading.Thread):
"""
Used for creating a threaded task
"""
def __init__(self, queue, function):
"""
Starts the threaded task
@param:
queue - Queue object
function - a function
"""
threading.Thread.__init__(self)
self.queue = queue
self.function = function
def run(self):
"""
Runs the function
"""
self.function()
def tk_loop(root, ex):
"""
Checks for messages every half a second
"""
if (ex.msg_list is not None):
ex.updateConversation()
root.after(2000, tk_loop, root, ex)
def initiate_tk_loop(root, ex):
"""
I honestly don't know how to thread this other than doing this terrible piece of code
"""
root.after(2000, tk_loop, root, ex)
def removeEmoji(msg):
"""
removes non ASCII chars
:param msg:
:return: new_msg with emjoy char removed
"""
new_msg = ""
for ch in msg:
pass
return new_msg
if __name__ == "__main__":
# create GUI
root = Tk()
root.resizable(width=False, height=False)
ex = GUI(root, client)
# make calls to api to load GUI with relavent information
initiate_tk_loop(root, ex)
root.mainloop()
|
from pipdeptree import get_installed_distributions, build_dist_index, construct_tree
from bs4 import BeautifulSoup
from json import dump, load
from urllib.request import urlretrieve
from pathlib import Path
from unittest import mock
from pkginfo import SDist
from johnnydep.cli import JohnnyDist
import requests
import setuptools
import tarfile
def read_packages():
with open("python_packages_list.json", "r") as f:
package_info = load(f)
return package_info
def download(download_link, output_folder, package_name, version):
url = download_link
dst = Path(output_folder).joinpath("{}_{}.tar.gz".format(package_name, version))
urlretrieve(url, dst)
def get_packages(package_name):
"""
Note that package name already starts with /simple/
This downloader only focuses on .tar.gz files
:param package_name:
:return:
"""
url = "https://pypi.org{}".format(package_name)
r = requests.get(url)
soup = BeautifulSoup(r.content, features='html.parser')
tar = 0
for id, link in enumerate(soup.find_all('a', href=True)):
if ".tar.gz" in link["href"]:
download(link["href"], "downloaded_packages/tar", package_name.split("/")[-2], id)
tar += 1
return {"tar": tar}
def extract_info_from_setup():
with mock.patch.object(setuptools, 'setup') as mock_setup:
import data_collector.downloaded_packages.setup
args, kwargs = mock_setup.call_args
print(kwargs)
def unpack(package_name):
with tarfile.open(package_name, mode="r:gz") as tf:
tf.extractall()
def parse(pkg_info):
mypackage = SDist(pkg_info)
return PackageInfo(version=mypackage.version, author=mypackage.author_email,
license=mypackage.license,
name=mypackage.name,
maintainer=mypackage.maintainer_email, additional_details=mypackage.__dict__)
def get_dependencies(package):
url = 'https://pypi.org/pypi/{}/json'
json = requests.get(url.format(package)).json()
print(json.keys())
# return json
def get_johnny_dep(package):
dist = JohnnyDist(package, index_url=None, env=None, extra_index_url=None)
return dist.serialise(fields=["name", "requires", "required_by", "project_name", "versions_available"], format=None, recurse=True)
if __name__ == '__main__':
# get_package_list()
# get_packages("/simple/jupyter/")
# unpack("downloaded_packages/tar/jupyter_1.tar.gz")
# extract_info_from_setup()
# print(parse("downloaded_packages/tar/jupyter_1.tar.gz").dump_details())
# print(pkg_resources.get_distribution("downloaded_packages/tar/jupyter_1.tar.gz"))
print(get_dependencies("pandas"))
# print(get_johnny_dep("ipython"))
|
#!/usr/local/bin/python3.6
# Substitute average testing value from optimization
import sys
if len(sys.argv) != 2:
sys.stderr.write('Usage: python3.X %s sy_fraction\n' % sys.argv[0])
raise SystemExit(1)
sy_fraction = sys.argv[1]
parameters = []
with open('input_data/infection_parameters.txt','r') as fin:
parameters = fin.readlines()
for ind,param in enumerate(parameters):
if 'average fraction to get tested' in param:
parameters[ind+1] = str(sy_fraction) + '\n'
break
with open('input_data/infection_parameters.txt','w') as fout:
fout.writelines(parameters)
|
""" Simple functions to manipulate strings """
# Replace the functions below with your implementation as described in the assignment
def is_rhyme(word1, word2, k):
"""
Returns True if the last k letters of the two words are the same (case sensitive).
Automatically returns False if either word contains less than k letters.
"""
if (k == 0): # Cannot compare if k is 0
return False
if (len(word1) < k or len(word2) < k): # Return False if either word
return False # contains less than k letters
rev_word1 = word1[::-1] # Reverse word1
rev_word2 = word2[::-1] # Reverse word2
# Compare first k chars of reversed word1 and reversed word2
# Equivalent of comparing last k chars of word 1 and word2
return rev_word1[:k] == rev_word2[:k]
# Test Cases
# print(is_rhyme("hello", "world", 7)) # False
# print(is_rhyme("hello", "llo", 3)) # True
# print(is_rhyme("ello", "ello", 0)) # False
# print(is_rhyme("elo", "ello", 3)) # False
# print(is_rhyme("ello", "ello", 4)) # True
# print(is_rhyme("ello", "ello", 5)) # False
|
import sys
if len(sys.argv) != 2:
print("Usage: python3 orgmybmarks.py bookmarks.html")
quit()
file = open(sys.argv[1])
# file = open("in.html")
fileout = open("out.html", "w")
hreflist = []
# 读到第一个链接
numm = 1
while True:
line = file.readline()
if not line:
break
if line.find("HREF") == -1:
continue
num = line.find("HREF")
href = line[num + 6:]
num = href.find("\"")
href = href[:num]
hreflist.append(href)
print("%d now %s" % (numm, href))
numm += 1
numbef = len(hreflist)
hreflist = list(set(hreflist)) # 去重
numaft = len(hreflist)
fir = '''<!DOCTYPE NETSCAPE-Bookmark-file-1>
<!-- This is an automatically generated file.
It will be read and overwritten.
DO NOT EDIT! -->
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
<TITLE>Bookmarks</TITLE>
<H1>Bookmarks</H1>
<DL><p>
<DT><H3 ADD_DATE="1530070116" LAST_MODIFIED="1532090715" PERSONAL_TOOLBAR_FOLDER="true">书签栏</H3>
<DL><p>
'''
fileout.write(fir)
for i in range(len(hreflist)):
sec = " <DT><A HREF=\"%s\">%d</A>\n" % (hreflist[i], i)
fileout.write(sec)
end = ''' </DL><p>
</DL><p>'''
fileout.write(end)
file.close()
fileout.close()
print("finished! now you have %d bookmarks, %d duplicated bookmarks deleted!" % (numaft, numbef - numaft))
|
import main
def test_execute():
count = main.results
assert count == 400410
|
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
import chainconsumer
from math import ceil
# pyburst
from . import mcmc_versions
from . import mcmc_tools
from . import burstfit
from . import mcmc_params
from pyburst.observations import obs_tools
from pyburst.plotting import plot_tools
from pyburst.grids.grid_strings import get_source_path, print_warning
from pyburst.misc.pyprint import printv
GRIDS_PATH = os.environ['KEPLER_GRIDS']
def default_plt_options():
"""Initialise default plot parameters"""
params = {'mathtext.default': 'regular',
'font.family': 'serif',
'text.usetex': False}
plt.rcParams.update(params)
default_plt_options()
def save_plot(fig, prefix, save, source, version, display, chain=None, n_dimensions=None,
n_walkers=None, n_steps=None, label=None, extension='.png',
enforce_chain_info=True):
"""Handles saving/displaying of a figure passed to it
"""
if enforce_chain_info and (None in (n_dimensions, n_walkers, n_steps)):
if chain is None:
raise ValueError('Must provide chain, or specify each of '
'(n_dimensions, n_walkers, n_steps)')
else:
n_walkers, n_steps, n_dimensions = chain.shape
if save:
filename = mcmc_tools.get_mcmc_string(source=source, version=version,
n_walkers=n_walkers, n_steps=n_steps,
prefix=prefix, label=label,
extension=extension)
source_path = get_source_path(source)
filepath = os.path.join(source_path, 'plots', prefix, f'{filename}')
fig.savefig(filepath)
if display:
plt.show(block=False)
else:
plt.close(fig)
def save_multiple_synth(series, source, version, n_steps, discard, n_walkers=960,
walkers=True, posteriors=True, contours=False,
display=False, mass_radius=True,
synth=True, compressed=False):
"""Save plots for multiple series in a synthetic data batch
"""
# TODO reuse max_lhood point
default_plt_options()
for ser in series:
if synth:
full_source = f'{source}_{ser}'
else:
full_source = source
chain = mcmc_tools.load_chain(full_source, n_walkers=n_walkers, n_steps=n_steps,
version=version, compressed=compressed)
if walkers:
plot_walkers(chain, source=full_source, save=True,
display=display, version=version)
if posteriors:
plot_posteriors(chain, source=full_source, save=True, discard=discard,
display=display, version=version)
if contours:
plot_contours(chain, source=full_source, save=True, discard=discard,
display=display, version=version)
if mass_radius:
plot_mass_radius(chain, source=full_source, save=True, discard=discard,
display=display, version=version)
def save_all_plots(source, version, discard, n_steps, n_walkers=1000, display=False,
save=True, cap=None, posteriors=True, contours=True,
redshift=True, mass_radius=True, verbose=True, compressed=False):
"""Saves (and/or displays) main MCMC plots
"""
chain = mcmc_tools.load_chain(source, version=version, n_steps=n_steps,
n_walkers=n_walkers, verbose=verbose,
compressed=compressed)
if posteriors:
printv('Plotting posteriors', verbose=verbose)
plot_posteriors(chain, source=source, save=save, discard=discard, cap=cap,
display=display, version=version)
if contours:
printv('Plotting contours', verbose=verbose)
plot_contours(chain, source=source, save=save, discard=discard, cap=cap,
display=display, version=version)
if mass_radius:
printv('Plotting mass-radius', verbose=verbose)
plot_mass_radius(chain, source=source, save=save, discard=discard, cap=cap,
display=display, version=version)
if redshift:
printv('Plotting redshift', verbose=verbose)
plot_redshift(chain, source=source, save=save, discard=discard, cap=cap,
display=display, version=version)
def plot_contours(chain, discard, source, version, cap=None,
display=True, save=False, truth_values=None, parameters=None,
sigmas=np.linspace(0, 2, 5), cc=None, summary=False, fontsize=14,
max_ticks=4):
"""Plots posterior contours of mcmc chain
parameters : [str]
specify which parameters to plot
"""
default_plt_options()
if cc is None:
pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')
pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)
cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels,
discard=discard, cap=cap, sigmas=sigmas,
summary=summary, fontsize=fontsize,
max_ticks=max_ticks)
if parameters is not None:
parameters = plot_tools.convert_mcmc_labels(param_keys=parameters)
# TODO: figsize
if truth_values is not None:
fig = cc.plotter.plot(truth=truth_values, parameters=parameters)
else:
fig = cc.plotter.plot(parameters=parameters)
save_plot(fig, prefix='contours', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_posteriors(chain, discard, source, version, cap=None,
display=True, save=False, truth_values=None,
cc=None):
"""Plots posterior distributions of mcmc chain
truth_values : list|dict
Specify parameters of point (e.g. the true value) to draw on the distributions.
"""
default_plt_options()
pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')
pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys)
if cc is None:
cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels,
discard=discard, cap=cap)
height = 3 * ceil(len(pkeys) / 4)
if truth_values is not None:
fig = cc.plotter.plot_distributions(figsize=[10, height],
truth=truth_values)
else:
fig = cc.plotter.plot_distributions(figsize=[10, height])
plt.tight_layout()
save_plot(fig, prefix='posteriors', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_mass_radius(chain, discard, source, version, cap=None,
display=True, save=False, summary=False,
sigmas=np.linspace(0, 2, 5), fontsize=18, figsize='column'):
"""Plots contours of mass versus radius from a given chain
"""
default_plt_options()
mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version)
mass_radius_chain = mcmc_params.get_mass_radius_chain(chain=chain, discard=discard,
source=source, version=version,
cap=cap, mass_nw=mass_nw,
mass_gr=mass_gr)
cc = mcmc_tools.setup_custom_chainconsumer(mass_radius_chain, parameters=['R', 'M'],
sigmas=sigmas, summary=summary,
fontsize=fontsize)
fig = cc.plotter.plot(figsize=figsize)
fig.subplots_adjust(left=0.16, bottom=0.15)
save_plot(fig, prefix='mass-radius', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_redshift(chain, discard, source, version, cap=None, display=True, save=False):
"""Plots posterior distribution of redshift given a chain
"""
mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version)
redshift_chain = mcmc_params.get_redshift_chain(chain=chain, discard=discard,
source=source, version=version,
cap=cap, mass_nw=mass_nw,
mass_gr=mass_gr)
cc = mcmc_tools.setup_custom_chainconsumer(redshift_chain, parameters=['1+z'])
fig = cc.plotter.plot_distributions(figsize=[5, 5])
plt.tight_layout()
save_plot(fig, prefix='redshift', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_gravitational_contours(chain, discard, source, version, cap=None, display=True,
save=False, r_nw=10, sigmas=np.linspace(0, 2, 5),
summary=False, unit_labels=True, fontsize=16,
fixed_grav=False, figsize=None):
"""Plots contours of gravitational parameters
"""
cc = mcmc_tools.setup_gravitational_chainconsumer(chain=chain, discard=discard,
source=source, version=version,
cap=cap, fixed_grav=fixed_grav,
summary=summary, r_nw=r_nw,
unit_labels=unit_labels,
sigmas=sigmas, fontsize=fontsize)
if fixed_grav:
fig = cc.plotter.plot_distributions(figsize=figsize)
plt.tight_layout()
else:
fig = cc.plotter.plot()
save_plot(fig, prefix='gravitational', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_inclination(chain, discard, source, version, cap=None, display=True,
save=False, disc_model='he16_a', sigmas=np.linspace(0, 2, 5),
summary=False, unit_labels=True, figsize=(4, 4), fontsize=18):
"""Plots contours of parameters derived using disc model
"""
disc_chain = mcmc_params.get_disc_chain(chain=chain, discard=discard, cap=cap,
source=source, version=version,
disc_model=disc_model)
cc = mcmc_tools.setup_custom_chainconsumer(disc_chain, parameters=['d', 'i'],
sigmas=sigmas, summary=summary,
unit_labels=unit_labels, fontsize=fontsize)
fig = cc.plotter.plot(figsize=figsize)
fig.subplots_adjust(left=0.15, bottom=0.15)
save_plot(fig, prefix='disc', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_distance_anisotropy(chain, discard, source, version, cap=None, display=True,
save=False, sigmas=np.linspace(0, 2, 5), summary=False,
figsize=(4, 4), unit_labels=True, fontsize=18):
"""Plots contours of MCMC parameters d_b, xi_ratio
"""
d_b_chain = mcmc_params.get_param_chain(chain, param='d_b', discard=discard,
source=source, version=version, cap=cap)
xi_ratio_chain = mcmc_params.get_param_chain(chain, param='xi_ratio', discard=discard,
source=source, version=version, cap=cap)
flat_chain = np.column_stack([d_b_chain, xi_ratio_chain])
cc = mcmc_tools.setup_custom_chainconsumer(flat_chain, parameters=['d_b', 'xi_ratio'],
sigmas=sigmas, summary=summary,
unit_labels=unit_labels, fontsize=fontsize)
fig = cc.plotter.plot(figsize=figsize)
fig.subplots_adjust(left=0.2, bottom=0.2)
save_plot(fig, prefix='distance', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_xedd(chain, discard, source, version, cap=None, display=True,
save=False, cloud=True, sigmas=np.linspace(0, 2, 10), figsize=(5, 5)):
"""Plots posterior for Eddington hydrogen composition (X_Edd)
"""
default_plt_options()
xedd_chain = mcmc_params.get_xedd_chain(chain=chain, discard=discard, source=source,
version=version, cap=cap)
label = plot_tools.quantity_label('xedd')
cc = mcmc_tools.setup_custom_chainconsumer(xedd_chain, parameters=[label],
sigmas=sigmas, cloud=cloud)
fig = cc.plotter.plot(figsize=figsize)
save_plot(fig, prefix='xedd', chain=chain, save=save, source=source,
version=version, display=display)
return fig
def plot_walkers(chain, source, version, params=None, n_lines=30, xlim=-1,
display=True, save=False, label=''):
"""Plots walkers vs steps (i.e. "time")
Parameters
----------
source : str
version : int
chain : np.array
chain as returned by load_chain()
params : [str]
parameter(s) of which to plot walkers.
n_lines : int
approx number of lines/walkers to plot on parameter
xlim : int
x-axis limit to plot (n_steps), i.e. ax.set_xlim((0, xlim))
label : str
optional label to add to filename when saving
display : bool
save : bool
"""
default_plt_options()
pkeys = mcmc_versions.get_parameter(source, version, 'param_keys')
# ===== Default to splitting all params into 2 plots =====
if params is None:
half = int(len(pkeys) / 2)
for i, param_split in enumerate((pkeys[:half], pkeys[half:])):
plot_walkers(chain=chain, source=source, version=version,
params=param_split, n_lines=n_lines, xlim=xlim,
display=display, save=save, label=f'P{i + 1}')
return
n_walkers, n_steps, n_dim = chain.shape
n_params = len(params)
jump_size = round(n_walkers / n_lines)
steps = np.arange(n_steps)
walker_idxs = np.arange(0, n_walkers, jump_size)
# noinspection PyTypeChecker
fig, ax = plt.subplots(n_params, 1, sharex=True, figsize=(10, 12))
for i in range(n_params):
p_idx = pkeys.index(params[i])
for j in walker_idxs:
walker = chain[j, :, p_idx]
ax[i].plot(steps, walker, linewidth=0.5, color='black')
ax[i].set_ylabel(params[i])
if xlim == -1:
xlim = n_steps
ax[-1].set_xlabel('Step')
ax[-1].set_xlim([0, xlim])
plt.tight_layout()
if display:
plt.show(block=False)
save_plot(fig, prefix='walkers', chain=chain, save=save, source=source,
version=version, display=display,
label=label, extension='.png')
def plot_qb_mdot(chain, source, version, discard, cap=None, display=True, save=False,
figsize=(5, 5), fontsize=16, sigmas=(1, 2)):
"""Plots 2D contours of Qb versus Mdot for each epoch (from multi-epoch chain)
"""
mv = mcmc_versions.McmcVersion(source=source, version=version)
chain_flat = mcmc_tools.slice_chain(chain, discard=discard, cap=cap, flatten=True)
system_table = obs_tools.load_summary(mv.system)
epochs = list(system_table.epoch)
cc = chainconsumer.ChainConsumer()
param_labels = []
for param in ['mdot', 'qb']:
param_labels += [plot_tools.full_label(param)]
for i, epoch in enumerate(epochs):
mdot_idx = mv.param_keys.index(f'mdot{i + 1}')
qb_idx = mv.param_keys.index(f'qb{i + 1}')
param_idxs = [mdot_idx, qb_idx]
cc.add_chain(chain_flat[:, param_idxs], parameters=param_labels,
name=str(epoch))
cc.configure(kde=False, smooth=0, label_font_size=fontsize,
tick_font_size=fontsize-2, sigmas=sigmas)
fig = cc.plotter.plot(display=False, figsize=figsize)
fig.subplots_adjust(left=0.2, bottom=0.2)
save_plot(fig, prefix='qb', save=save, source=source, version=version,
display=display, chain=chain)
return fig
def plot_epoch_posteriors(master_cc, source, version, display=True, save=False,
col_wrap=None, alt_params=True, unit_labels=True,
add_text=True, fontsize=16):
"""Plot posteriors for multiiple epoch chains
parameters
----------
master_cc : ChainConsumer
Contains the multi-epoch chain, created with setup_master_chainconsumer()
source : str
version : int
display : bool (optional)
save : bool (optional)
col_wrap : int (optional)
"""
param_order = {
'grid5': ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'm_nw',
'm_gr', 'd_b', 'xi_ratio'],
'he2': ['mdot1', 'mdot2', 'qb1', 'qb2', 'm_gr', 'd_b', 'xi_ratio'],
}
param_keys = param_order[source]
# TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# quick and dirty patch!
if alt_params:
param_keys = ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'g',
'M', 'd_b', 'xi_ratio']
# TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
formatted_params = plot_tools.convert_mcmc_labels(param_keys, unit_labels=unit_labels)
n_epochs = len(master_cc.chains) - 1
if col_wrap is None:
col_wrap = n_epochs
height = 3 * ceil(len(param_keys) / n_epochs)
fig = master_cc.plotter.plot_distributions(parameters=formatted_params,
col_wrap=col_wrap,
figsize=[8, height],
display=False)
if add_text:
add_epoch_text(fig, fontsize=fontsize)
plt.tight_layout()
save_plot(fig, prefix='multi_posteriors', save=save, source=source, version=version,
display=display, enforce_chain_info=False)
return fig
def plot_max_lhood(source, version, n_walkers, n_steps, verbose=True, re_interp=False,
display=True, save=False):
default_plt_options()
max_params, max_lhood = mcmc_tools.get_max_lhood_params(source, version=version,
n_walkers=n_walkers,
n_steps=n_steps,
verbose=verbose,
return_lhood=True)
bfit = burstfit.BurstFit(source=source, version=version, verbose=False, re_interp=re_interp)
lhood, fig = bfit.lhood(max_params, plot=True)
if lhood != max_lhood:
print_warning(f'lhoods do not match (original={max_lhood:.2f}, current={lhood:.2f}). '
+ 'BurstFit (e.g. lhood, lnhood) or interpolator may have changed')
save_plot(fig, prefix='compare', n_dimensions=len(max_params),
n_walkers=n_walkers, n_steps=n_steps, save=save, source=source,
version=version, display=display)
def plot_bprop_sample(bp_sample, source, version, bprops=None, legend=True,
subplot_figsize=(3, 2.5), bfit=None, fontsize=14,
vlines=True):
"""Plot burst properties from large sample against observations
bprop_sample : np.array
obtained using mcmc_tools.bprop_sample()
"""
if bfit is None:
bfit = burstfit.BurstFit(source=source, version=version, verbose=False)
if bprops is None:
bprops = bfit.mcmc_version.bprops
cc = mcmc_tools.setup_bprop_chainconsumer(chain=None, n=None, discard=None,
source=source, version=version,
bp_sample=bp_sample)
bp_summary = mcmc_tools.extract_bprop_summary(cc, source=source, version=version)
n_bprops = len(bprops)
n_rows = int(np.ceil(n_bprops / 2))
n_cols = {False: 1, True: 2}.get(n_bprops > 1)
figsize = (n_cols * subplot_figsize[0], n_rows * subplot_figsize[1])
fig, ax = plt.subplots(n_rows, n_cols, sharex=False, figsize=figsize)
if n_bprops % 2 == 1 and n_bprops > 1: # blank odd-numbered subplot
ax[-1, -1].axis('off')
for i, bprop in enumerate(bprops):
subplot_row = int(np.floor(i / 2))
subplot_col = i % 2
if n_cols > 1:
axis = ax[subplot_row, subplot_col]
else:
axis = ax
u_model = np.diff(bp_summary[:, :, i], axis=0)
bfit.plot_compare(model=bp_summary[1, :, i], u_model=u_model,
bprop=bprop, fontsize=fontsize,
ax=axis, display=False, vlines=vlines,
legend=True if (i == 0 and legend) else False,
xlabel=True if (i in [n_bprops-1, ]) else False)
fig.subplots_adjust(wspace=0.4)
plt.show(block=False)
return fig
def plot_autocorrelation(chain, source, version, n_points=10, load=True, save_tau=True,
ylims=None):
"""Plots estimated integrated autocorrelation time
Note: Adapted from https://dfm.io/posts/autocorr/
"""
mv = mcmc_versions.McmcVersion(source=source, version=version)
params_fmt = plot_tools.convert_mcmc_labels(mv.param_keys)
if load:
sample_steps, autoc = mcmc_tools.load_autocorrelation(source, version=version,
n_steps=chain.shape[1])
else:
sample_steps, autoc = mcmc_tools.get_autocorrelation(chain, source=source,
version=version,
n_points=n_points,
save=save_tau)
fig, ax = plt.subplots()
for i, param in enumerate(mv.param_keys):
ax.loglog(sample_steps, autoc[i], "o-", label=rf"{params_fmt[i]}")
ax.plot(sample_steps, sample_steps / 10.0, "--k", label=r"$\tau = N/10$")
if ylims is None:
xlim = ax.get_xlim()
ylims = [5, xlim[1] / 10]
ax.set_ylim(ylims)
ax.set_xlabel("N steps")
ax.set_ylabel(r"$\tau$ estimate (N)")
ax.legend(fontsize=14, ncol=2, labelspacing=0.3)
plt.show(block=False)
return fig
def add_epoch_text(fig, fontsize, epochs=(1998, 2000, 2007),
colours=('C0', 'C2', 'C3')):
"""Adds text of epoch to figure subplots
"""
for i, epoch in enumerate(epochs):
ax = fig.axes[i]
ax.text(0.95, 0.95, str(epoch), color=colours[i], fontsize=fontsize,
transform=ax.transAxes, va='top', ha='right')
|
import socket
import time
from xmlrpclib_to import ServerProxy
import httpretty
import pytest
XML_RESPONSE = """<?xml version="1.0"?>
<methodResponse>
<params>
<param>
<value><string>Test</string></value>
</param>
</params>
</methodResponse>"""
def timeout(request, url, headers):
time.sleep(1)
return 200, headers, XML_RESPONSE
@httpretty.activate
def test_timeout():
httpretty.register_uri(
httpretty.POST,
'http://example.com/RPC2',
content_type='text/xml',
body=timeout
)
proxy = ServerProxy('http://example.com', timeout=0.5)
with pytest.raises(socket.timeout):
proxy.test()
@httpretty.activate
def test_timeout_https():
httpretty.register_uri(
httpretty.POST,
'https://example.com/RPC2',
content_type='text/xml',
body=timeout
)
proxy = ServerProxy('https://example.com', timeout=0.5)
with pytest.raises(socket.timeout):
proxy.test()
if __name__ == "__main__":
test_timeout()
test_timeout_https()
|
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from .models import Image
@receiver(m2m_changed, sender=Image.users_like.through)
def users_like_changed(sender, instance, **kwargs):
instance.total_likes = instance.users_like.count()
instance.save()
|
{
"targets": [
{
"target_name": "binding",
"win_delay_load_hook": "true",
"conditions": [
["target_arch == 'x64' or target_arch == 'ia32'", {
"sources": [
"src/binding.cpp",
"src/BLAKE2/sse/blake2b.c",
"src/BLAKE2/sse/blake2bp.c",
"src/BLAKE2/sse/blake2s.c",
"src/BLAKE2/sse/blake2sp.c"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src/BLAKE2/sse"
]
}],
["target_arch == 'arm64'", {
"sources": [
"src/binding.cpp",
"src/BLAKE2/neon/blake2b-neon.c",
"src/BLAKE2/neon/blake2bp.c",
"src/BLAKE2/neon/blake2s-neon.c",
"src/BLAKE2/neon/blake2sp.c"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src/BLAKE2/neon"
]
}],
["target_arch != 'x64' and target_arch != 'ia32' and target_arch != 'arm64'", {
"sources": [
"src/binding.cpp",
"src/BLAKE2/ref/blake2b-ref.c",
"src/BLAKE2/ref/blake2bp-ref.c",
"src/BLAKE2/ref/blake2s-ref.c",
"src/BLAKE2/ref/blake2sp-ref.c"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src/BLAKE2/ref"
]
}]
],
"cflags_c": [
"-std=c99",
"-Wstrict-aliasing",
"-Wextra",
"-Wno-unused-function",
"-Wno-unused-const-variable"
],
"cflags_cc": [
"-Wstrict-aliasing",
"-Wextra",
"-Wno-unused-function",
"-Wno-unused-const-variable",
"-Wno-unused-parameter"
],
'xcode_settings': {
'OTHER_CFLAGS': [
"-Wstrict-aliasing",
"-Wextra",
"-Wno-unused-function",
"-Wno-unused-const-variable",
"-Wno-unused-parameter"
]
},
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/arch:AVX"]
}
}
}
]
}
|
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
@pytest.fixture
def process_test_df():
"Base DataFrame"
return pd.DataFrame(
{"text": ["a_b_c", "c_d_e", np.nan, "f_g_h"], "numbers": range(1, 5)}
)
@pytest.fixture
def test_returns_dataframe():
"Base DataFrame"
return pd.DataFrame(
{"text": ["a1a2", "b1", "c1"], "numbers": [1, 2, 3]},
index=["A", "B", "C"],
)
def test_column_name_type(process_test_df):
"""Raise TypeError if `column_name` type is not `str`."""
with pytest.raises(TypeError):
process_test_df.process_text(["text"])
@pytest.mark.xfail(reason="new_column_names is deprecated.")
def test_new_column_names_type(process_test_df):
"""Raise TypeError if `new_column_names` type is not string or list."""
with pytest.raises(TypeError):
process_test_df.process_text(
column_name="text", new_column_names={"nutext": "rar"}
)
def test_column_name_presence(process_test_df):
"""Raise ValueError if `column_name` is not in dataframe."""
with pytest.raises(ValueError):
process_test_df.process_text(
column_name="Test", string_function="lower"
)
@pytest.mark.xfail(reason="new_column_names is deprecated.")
def test_new_column_names_presence_str(test_returns_dataframe):
"""
Raise ValueError if `new_column_names` is a str
and is in the dataframe.
"""
with pytest.raises(ValueError):
test_returns_dataframe.process_text(
column_name="text",
new_column_names="text",
string_function="extractall",
pat=r"([ab])?(\d)",
)
@pytest.mark.xfail(reason="new_column_names is deprecated.")
def test_new_column_names_presence_list(test_returns_dataframe):
"""
Raise ValueError if `new_column_names` is a list and at least
one of the new names is in the dataframe.
"""
with pytest.raises(ValueError):
test_returns_dataframe.process_text(
column_name="text",
new_column_names=["numbers", "newtext"],
string_function="extractall",
pat=r"([ab])?(\d)",
)
@pytest.mark.xfail(reason="merge_frame is deprecated.")
def test_merge_frame_type(test_returns_dataframe):
"""
Raise TypeError if `merge_frame` type is not bool."""
with pytest.raises(TypeError):
test_returns_dataframe.process_text(
column_name="text",
new_column_names=["number", "newtext"],
string_function="extractall",
pat=r"([ab])?(\d)",
merge_frame="True",
)
@pytest.mark.xfail(reason="string_function must be present.")
def test_string_function_is_None(process_test_df):
"""Test that dataframe is returned if string_function is None."""
result = process_test_df.process_text(column_name="text")
assert_frame_equal(result, process_test_df)
def test_str_split(process_test_df):
"""Test wrapper for Pandas `str.split()` method."""
expected = process_test_df.assign(
text=process_test_df["text"].str.split("_")
)
result = process_test_df.process_text(
column_name="text", string_function="split", pat="_"
)
assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="new_column_names is deprecated.")
def test_new_column_names(process_test_df):
"""
Test that a new column name is created when
`new_column_name` is not None.
"""
result = process_test_df.process_text(
column_name="text",
new_column_names="new_text",
string_function="slice",
start=2,
)
expected = process_test_df.assign(
new_text=process_test_df["text"].str.slice(start=2)
)
assert_frame_equal(result, expected)
@pytest.fixture
def no_nulls_df():
return pd.DataFrame({"text": ["a", "b", "c", "d"], "numbers": range(1, 5)})
def test_str_cat(no_nulls_df):
"""Test outcome for Pandas `.str.cat()` method."""
result = no_nulls_df.process_text(
column_name="text",
string_function="cat",
others=["A", "B", "C", "D"],
)
expected = no_nulls_df.assign(
text=no_nulls_df["text"].str.cat(others=["A", "B", "C", "D"])
)
assert_frame_equal(result, expected)
def test_str_cat_result_is_a_string(no_nulls_df):
"""
Test wrapper for Pandas `.str.cat()` method
when the outcome is a string.
"""
result = no_nulls_df.process_text(
column_name="text",
string_function="cat",
)
expected = no_nulls_df.assign(text=no_nulls_df["text"].str.cat())
assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="new_column_names is deprecated.")
def test_str_cat_result_is_a_string_and_new_column_names(no_nulls_df):
"""
Test wrapper for Pandas `.str.cat()` method when the outcome is a string,
and `new_column_names` is not None.
"""
result = no_nulls_df.process_text(
column_name="text", string_function="cat", new_column_names="combined"
)
expected = no_nulls_df.assign(combined=no_nulls_df["text"].str.cat())
assert_frame_equal(result, expected)
def test_str_get():
"""Test outcome for Pandas `.str.get()` method."""
df = pd.DataFrame(
{"text": ["aA", "bB", "cC", "dD"], "numbers": range(1, 5)}
)
expected = df.assign(text=df["text"].str.get(1))
result = df.process_text(column_name="text", string_function="get", i=-1)
assert_frame_equal(result, expected)
def test_str_lower():
"""Test string conversion to lowercase using `.str.lower()`."""
df = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"Graham Chapman",
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Terry Jones",
"Michael Palin",
],
}
)
expected = df.assign(names=df["names"].str.lower())
result = df.process_text(column_name="names", string_function="lower")
assert_frame_equal(result, expected)
def test_str_wrong(process_test_df):
"""Test that an invalid Pandas string method raises an exception."""
with pytest.raises(KeyError):
process_test_df.process_text(
column_name="text", string_function="invalid_function"
)
def test_str_wrong_parameters(process_test_df):
"""Test that invalid argument for Pandas string method raises an error."""
with pytest.raises(TypeError):
process_test_df.process_text(
column_name="text", string_function="split", pattern="_"
)
@pytest.fixture
def returns_frame_1():
return pd.DataFrame(
{
"ticker": [
"spx 5/25/2001 p500",
"spx 5/25/2001 p600",
"spx 5/25/2001 p700",
]
}
)
@pytest.mark.xfail(reason="merge_frame is deprecated.")
def test_return_dataframe_merge_is_None(returns_frame_1):
"""
Test that the dataframe returned when `merge_frame` is None
is the result of the text processing, and is not merged to
the original dataframe.
"""
expected_output = returns_frame_1["ticker"].str.split(" ", expand=True)
result = returns_frame_1.process_text(
column_name="ticker", string_function="split", expand=True, pat=" "
)
assert_frame_equal(result, expected_output)
@pytest.mark.xfail(reason="merge_frame is deprecated.")
def test_return_dataframe_merge_is_not_None(returns_frame_1):
"""
Test that the dataframe returned when `merge_frame` is not None
is a merger of the original dataframe, and the dataframe
generated from the text processing.
"""
expected_output = pd.concat(
[
returns_frame_1,
returns_frame_1["ticker"]
.str.split(" ", expand=True)
.add_prefix("new_"),
],
axis="columns",
)
result = returns_frame_1.process_text(
column_name="ticker",
new_column_names="new_",
merge_frame=True,
string_function="split",
expand=True,
pat=" ",
)
assert_frame_equal(result, expected_output)
@pytest.mark.xfail(reason="merge_frame is deprecated.")
def test_return_dataframe_merge_is_not_None_new_column_names_is_a_list(
returns_frame_1,
):
"""
Test that the dataframe returned when `merge_frame` is not None
is a merger of the original dataframe, and the dataframe
generated from the text processing. Also, the `new_column_names`
is a list.
"""
expected_output = pd.concat(
[
returns_frame_1,
returns_frame_1["ticker"]
.str.split(" ", expand=True)
.set_axis(["header1", "header2", "header3"], axis="columns"),
],
axis="columns",
)
result = returns_frame_1.process_text(
column_name="ticker",
new_column_names=["header1", "header2", "header3"],
merge_frame=True,
string_function="split",
expand=True,
pat=" ",
)
assert_frame_equal(result, expected_output)
@pytest.mark.xfail(reason="new_column_names is deprecated.")
def test_return_dataframe_new_column_names_is_a_list_len_unequal(
returns_frame_1,
):
"""
Raise error if text processing returns a dataframe,
`new_column_names` is not None, and the length of
`new_column_names` is not equal to the length of the
new dataframe's columns.
"""
with pytest.raises(ValueError):
returns_frame_1.process_text(
column_name="ticker",
new_column_names=["header1", "header2"],
merge_frame=True,
string_function="split",
expand=True,
pat=" ",
)
def test_output_extractall(test_returns_dataframe):
"""
Raise ValueError if the output is a dataframe.
"""
with pytest.raises(ValueError):
test_returns_dataframe.process_text(
column_name="text",
string_function="extractall",
pat=r"(?P<letter>[ab])?(?P<digit>\d)",
)
@pytest.mark.xfail(reason="merge_frame is deprecated.")
def test_output_extractall_merge_frame_is_not_None(test_returns_dataframe):
"""
Test output when `string_function` is "extractall"
and `merge_frame` is not None.
"""
expected_output = test_returns_dataframe["text"].str.extractall(
r"(?P<letter>[ab])?(?P<digit>\d)"
)
expected_output = test_returns_dataframe.join(
expected_output.reset_index("match"), how="outer"
).set_index("match", append=True)
result = test_returns_dataframe.process_text(
column_name="text",
merge_frame=True,
string_function="extractall",
pat=r"(?P<letter>[ab])?(?P<digit>\d)",
)
assert_frame_equal(result, expected_output)
|
from __future__ import division
from sympy import (Abs, Catalan, cos, Derivative, E, EulerGamma, exp,
factorial, factorial2, Function, GoldenRatio, I, Integer, Integral,
Interval, Lambda, Limit, log, Matrix, nan, O, oo, pi, Rational, Float, Rel,
S, sin, SparseMatrix, sqrt, summation, Sum, Symbol, symbols, Wild,
WildFunction, zeta, zoo, Dummy, Dict, Tuple, FiniteSet)
from sympy.core import Expr
from sympy.physics.units import second, joule
from sympy.polys import Poly, RootOf, RootSum, groebner
from sympy.statistics.distributions import Normal, Sample, Uniform
from sympy.geometry import Point, Circle
from sympy.utilities.pytest import raises
from sympy.printing import sstr, sstrrepr, StrPrinter
from sympy.core.trace import Tr
x, y, z, w = symbols('x,y,z,w')
d = Dummy('d')
def test_printmethod():
class R(Abs):
def _sympystr(self, printer):
return "foo(%s)" % printer._print(self.args[0])
assert sstr(R(x)) == "foo(x)"
class R(Abs):
def _sympystr(self, printer):
return "foo"
assert sstr(R(x)) == "foo"
def test_Abs():
assert str(Abs(x)) == "Abs(x)"
assert str(Abs(Rational(1,6))) == "1/6"
assert str(Abs(Rational(-1,6))) == "1/6"
def test_Add():
assert str(x+y) == "x + y"
assert str(x+1) == "x + 1"
assert str(x+x**2) == "x**2 + x"
assert str(5+x+y+x*y+x**2+y**2) == "x**2 + x*y + x + y**2 + y + 5"
assert str(1+x+x**2/2+x**3/3) == "x**3/3 + x**2/2 + x + 1"
assert str(2*x-7*x**2 + 2 + 3*y) == "-7*x**2 + 2*x + 3*y + 2"
assert str(x-y) == "x - y"
assert str(2-x) == "-x + 2"
assert str(x-2) == "x - 2"
assert str(x-y-z-w) == "-w + x - y - z"
assert str(x-z*y**2*z*w) == "-w*y**2*z**2 + x"
assert str(x-1*y*x*y) == "-x*y**2 + x"
assert str(sin(x).series(x, 0, 15)) == "x - x**3/6 + x**5/120 - x**7/5040 + x**9/362880 - x**11/39916800 + x**13/6227020800 + O(x**15)"
def test_Catalan():
assert str(Catalan) == "Catalan"
def test_ComplexInfinity():
assert str(zoo) == "zoo"
def test_Derivative():
assert str(Derivative(x, y)) == "Derivative(x, y)"
assert str(Derivative(x**2, x, evaluate=False)) == "Derivative(x**2, x)"
assert str(Derivative(x**2/y, x, y, evaluate=False)) == "Derivative(x**2/y, x, y)"
def test_dict():
assert str({1: 1+x}) == sstr({1: 1+x}) == "{1: x + 1}"
assert str({1: x**2, 2: y*x}) in ("{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr({1: x**2, 2: y*x}) == "{1: x**2, 2: x*y}"
def test_Dict():
assert str(Dict({1: 1+x})) == sstr({1: 1+x}) == "{1: x + 1}"
assert str(Dict({1: x**2, 2: y*x})) in (
"{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}")
assert sstr(Dict({1: x**2, 2: y*x})) == "{1: x**2, 2: x*y}"
def test_Dummy():
assert str(d) == "_d"
assert str(d+x) == "_d + x"
def test_EulerGamma():
assert str(EulerGamma) == "EulerGamma"
def test_Exp():
assert str(E) == "E"
def test_factorial():
n = Symbol('n', integer=True)
assert str(factorial(-2)) == "0"
assert str(factorial(0)) == "1"
assert str(factorial(7)) == "5040"
assert str(factorial(n)) == "n!"
assert str(factorial(2*n)) == "(2*n)!"
assert str(factorial(factorial(n))) == '(n!)!'
assert str(factorial(factorial2(n))) == '(n!!)!'
assert str(factorial2(factorial(n))) == '(n!)!!'
assert str(factorial2(factorial2(n))) == '(n!!)!!'
def test_Function():
f = Function('f')
fx = f(x)
w = WildFunction('w')
assert str(f) == "f"
assert str(fx) == "f(x)"
assert str(w) == "w_"
def test_Geometry():
assert sstr(Point(0,0)) == 'Point(0, 0)'
assert sstr(Circle(Point(0,0), 3)) == 'Circle(Point(0, 0), 3)'
# TODO test other Geometry entities
def test_GoldenRatio():
assert str(GoldenRatio) == "GoldenRatio"
def test_ImaginaryUnit():
assert str(I) == "I"
def test_Infinity():
assert str(oo) == "oo"
assert str(oo*I) == "oo*I"
def test_Integer():
assert str(Integer(-1)) == "-1"
assert str(Integer(1)) == "1"
assert str(Integer(-3)) == "-3"
assert str(Integer(0)) == "0"
assert str(Integer(25)) == "25"
def test_Integral():
assert str(Integral(sin(x), y)) == "Integral(sin(x), y)"
assert str(Integral(sin(x), (y, 0, 1))) == "Integral(sin(x), (y, 0, 1))"
def test_Interval():
a = Symbol('a', real=True)
assert str(Interval(0, a)) == "[0, a]"
assert str(Interval(0, a, False, False)) == "[0, a]"
assert str(Interval(0, a, True, False)) == "(0, a]"
assert str(Interval(0, a, False, True)) == "[0, a)"
assert str(Interval(0, a, True, True)) == "(0, a)"
def test_Lambda():
assert str(Lambda(d, d**2)) == "Lambda(_d, _d**2)"
def test_Limit():
assert str(Limit(sin(x)/x, x, y)) == "Limit(sin(x)/x, x, y)"
assert str(Limit(1/x, x, 0)) == "Limit(1/x, x, 0)"
assert str(Limit(sin(x)/x, x, y, dir="-")) == "Limit(sin(x)/x, x, y, dir='-')"
def test_list():
assert str([x]) == sstr([x]) == "[x]"
assert str([x**2, x*y+1]) == sstr([x**2, x*y+1]) == "[x**2, x*y + 1]"
assert str([x**2, [y+x]]) == sstr([x**2, [y+x]]) == "[x**2, [x + y]]"
def test_Matrix():
M = Matrix([[x**+1, 1], [y, x+y]])
assert str(M) == sstr(M) == "[x, 1]\n[y, x + y]"
M = Matrix()
assert str(M) == sstr(M) == "[]"
M = Matrix(0, 1, lambda i, j: 0)
assert str(M) == sstr(M) == "[]"
def test_Mul():
assert str(x/y) == "x/y"
assert str(y/x) == "y/x"
assert str(x/y/z) == "x/(y*z)"
assert str((x+1)/(y+2)) == "(x + 1)/(y + 2)"
assert str(2*x/3) == '2*x/3'
assert str(-2*x/3) == '-2*x/3'
class CustomClass1(Expr):
is_commutative = True
class CustomClass2(Expr):
is_commutative = True
cc1 = CustomClass1()
cc2 = CustomClass2()
assert str(Rational(2)*cc1) == '2*CustomClass1()'
assert str(cc1*Rational(2)) == '2*CustomClass1()'
assert str(cc1*Float("1.5")) == '1.5*CustomClass1()'
assert str(cc2*Rational(2)) == '2*CustomClass2()'
assert str(cc2*Rational(2)*cc1) == '2*CustomClass1()*CustomClass2()'
assert str(cc1*Rational(2)*cc2) == '2*CustomClass1()*CustomClass2()'
def test_NaN():
assert str(nan) == "nan"
def test_NegativeInfinity():
assert str(-oo) == "-oo"
def test_Normal():
assert str(Normal(x+y, z)) == "Normal(x + y, z)"
def test_Order():
assert str(O(x)) == "O(x)"
assert str(O(x**2)) == "O(x**2)"
assert str(O(x*y)) == "O(x*y, x, y)"
def test_Permutation_Cycle():
from sympy.combinatorics import Permutation, Cycle
# general principle: economically, canonically show all moved elements
# and the size of the permutation.
for p, s in [
(Cycle(),
'Cycle()'),
(Cycle(2),
'Cycle(2)'),
(Cycle(2, 1),
'Cycle(1, 2)'),
(Cycle(1, 2)(5)(6, 7)(10),
'Cycle(1, 2)(6, 7)(10)'),
(Cycle(3, 4)(1, 2)(3, 4),
'Cycle(1, 2)(4)'),
]:
assert str(p) == s
Permutation.print_cyclic = False
for p, s in [
(Permutation([]),
'Permutation([])'),
(Permutation([], size=1),
'Permutation([0])'),
(Permutation([], size=2),
'Permutation([0, 1])'),
(Permutation([], size=10),
'Permutation([], size=10)'),
(Permutation([1, 0, 2]),
'Permutation([1, 0, 2])'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation([1, 0], size=6)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation([1, 0], size=10)'),
]:
assert str(p) == s
Permutation.print_cyclic = True
for p, s in [
(Permutation([]),
'Permutation()'),
(Permutation([], size=1),
'Permutation(0)'),
(Permutation([], size=2),
'Permutation(1)'),
(Permutation([], size=10),
'Permutation(9)'),
(Permutation([1, 0, 2]),
'Permutation(2)(0, 1)'),
(Permutation([1, 0, 2, 3, 4, 5]),
'Permutation(5)(0, 1)'),
(Permutation([1, 0, 2, 3, 4, 5], size=10),
'Permutation(9)(0, 1)'),
(Permutation([0, 1, 3, 2, 4, 5], size=10),
'Permutation(9)(2, 3)'),
]:
assert str(p) == s
def test_Pi():
assert str(pi) == "pi"
def test_Poly():
assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')"
assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')"
assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')"
assert str(Poly(2*x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')"
assert str(Poly(2*x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')"
assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')"
assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')"
assert str(Poly(-2*x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')"
assert str(Poly(-2*x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')"
assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')"
assert str(Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')"
assert str(Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')"
assert str(Poly(x**2 + I*x, x)) == "Poly(x**2 + I*x, x, domain='EX')"
assert str(Poly(x**2 - I*x, x)) == "Poly(x**2 - I*x, x, domain='EX')"
assert str(Poly(-x*y*z + x*y - 1, x, y, z)) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')"
assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \
"Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')"
assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)"
assert str(Poly(2*x**2 + 3*x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)"
def test_Pow():
assert str(x**-1) == "1/x"
assert str(x**-2) == "x**(-2)"
assert str(x**2) == "x**2"
assert str((x+y)**-1) == "1/(x + y)"
assert str((x+y)**-2) == "(x + y)**(-2)"
assert str((x+y)**2) == "(x + y)**2"
assert str((x+y)**(1+x)) == "(x + y)**(x + 1)"
assert str(x**Rational(1, 3)) == "x**(1/3)"
assert str(1/x**Rational(1, 3)) == "x**(-1/3)"
assert str(sqrt(sqrt(x))) == "x**(1/4)"
assert str(x**-1.0) == '1/x'
def test_sqrt():
assert str(sqrt(x)) == "sqrt(x)"
assert str(sqrt(x**2)) == "sqrt(x**2)"
assert str(1/sqrt(x)) == "1/sqrt(x)"
assert str(1/sqrt(x**2)) == "1/sqrt(x**2)"
assert str(y/sqrt(x)) == "y/sqrt(x)"
assert str(x**(1/2)) == "x**0.5"
assert str(1/x**(1/2)) == "x**(-0.5)"
def test_Rational():
n1 = Rational(1,4)
n2 = Rational(1,3)
n3 = Rational(2,4)
n4 = Rational(2,-4)
n5 = Rational(0)
n6 = Rational(1)
n7 = Rational(3)
n8 = Rational(-3)
assert str(n1*n2) == "1/12"
assert str(n1*n2) == "1/12"
assert str(n3) == "1/2"
assert str(n1*n3) == "1/8"
assert str(n1+n3) == "3/4"
assert str(n1+n2) == "7/12"
assert str(n1+n4) == "-1/4"
assert str(n4*n4) == "1/4"
assert str(n4+n2) == "-1/6"
assert str(n4+n5) == "-1/2"
assert str(n4*n5) == "0"
assert str(n3+n4) == "0"
assert str(n1**n7) == "1/64"
assert str(n2**n7) == "1/27"
assert str(n2**n8) == "27"
assert str(n7**n8) == "1/27"
assert str(Rational("-25")) == "-25"
assert str(Rational("1.25")) == "5/4"
assert str(Rational("-2.6e-2")) == "-13/500"
assert str(S("25/7")) == "25/7"
assert str(S("-123/569")) == "-123/569"
assert str(S("0.1[23]", rational=1)) == "61/495"
assert str(S("5.1[666]", rational=1)) == "31/6"
assert str(S("-5.1[666]", rational=1)) == "-31/6"
assert str(S("0.[9]", rational=1)) == "1"
assert str(S("-0.[9]", rational=1)) == "-1"
assert str(sqrt(Rational(1,4))) == "1/2"
assert str(sqrt(Rational(1,36))) == "1/6"
assert str((123**25) ** Rational(1,25)) == "123"
assert str((123**25+1)**Rational(1,25)) != "123"
assert str((123**25-1)**Rational(1,25)) != "123"
assert str((123**25-1)**Rational(1,25)) != "122"
assert str(sqrt(Rational(81,36))**3) == "27/8"
assert str(1/sqrt(Rational(81,36))**3) == "8/27"
assert str(sqrt(-4)) == str(2*I)
assert str(2**Rational(1,10**10)) == "2**(1/10000000000)"
def test_Float():
# NOTE prec is the whole number of decimal digits
assert str(Float('1.23', prec=1+2)) == '1.23'
assert str(Float('1.23456789', prec=1+8)) == '1.23456789'
assert str(Float('1.234567890123456789', prec=1+18)) == '1.234567890123456789'
assert str(pi.evalf(1+2)) == '3.14'
assert str(pi.evalf(1+14)) == '3.14159265358979'
assert str(pi.evalf(1+64)) == '3.1415926535897932384626433832795028841971693993751058209749445923'
assert str(pi.round(-1)) == '0.'
assert str((pi**400 - (pi**400).round(1)).n(2)) == '-0.e+88'
def test_Relational():
assert str(Rel(x, y, "<")) == "x < y"
assert str(Rel(x+y, y, "==")) == "x + y == y"
def test_RootOf():
assert str(RootOf(x**5 + 2*x - 1, 0)) == "RootOf(x**5 + 2*x - 1, 0)"
def test_RootSum():
f = x**5 + 2*x - 1
assert str(RootSum(f, Lambda(z, z), auto=False)) == "RootSum(x**5 + 2*x - 1)"
assert str(RootSum(f, Lambda(z, z**2), auto=False)) == "RootSum(x**5 + 2*x - 1, Lambda(_z, _z**2))"
def test_GroebnerBasis():
assert str(groebner([], x, y)) == "GroebnerBasis([], x, y, domain='ZZ', order='lex')"
F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
assert str(groebner(F, order='grlex')) == \
"GroebnerBasis([x**2 - x - 3*y + 1, y**2 - 2*x + y - 1], x, y, domain='ZZ', order='grlex')"
assert str(groebner(F, order='lex')) == \
"GroebnerBasis([2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7], x, y, domain='ZZ', order='lex')"
def test_Sample():
assert str(Sample([x, y, 1])) in [
"Sample([x, y, 1])",
"Sample([y, 1, x])",
"Sample([1, x, y])",
"Sample([y, x, 1])",
"Sample([x, 1, y])",
"Sample([1, y, x])",
]
def test_set():
assert sstr(set()) == 'set()'
assert sstr(frozenset()) == 'frozenset()'
assert sstr(set([1,2,3]))== 'set([1, 2, 3])'
assert sstr(set([1,x,x**2,x**3,x**4])) == 'set([1, x, x**2, x**3, x**4])'
def test_SparseMatrix():
M = SparseMatrix([[x**+1, 1], [y, x+y]])
assert str(M) == sstr(M) == "[x, 1]\n[y, x + y]"
def test_Sum():
assert str(summation(cos(3*z), (z, x, y))) == "Sum(cos(3*z), (z, x, y))"
assert str(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \
"Sum(x*y**2, (x, -2, 2), (y, -5, 5))"
def test_Symbol():
assert str(y) == "y"
assert str(x) == "x"
e = x
assert str(e) == "x"
def test_tuple():
assert str((x,)) == sstr((x,)) == "(x,)"
assert str((x+y, 1+x)) == sstr((x+y, 1+x)) == "(x + y, x + 1)"
assert str((x+y, (1+x, x**2))) == sstr((x+y, (1+x, x**2))) == "(x + y, (x + 1, x**2))"
def test_Uniform():
assert str(Uniform(x, y)) == "Uniform(x, y)"
assert str(Uniform(x+y, y)) == "Uniform(x + y, y)"
def test_Unit():
assert str(second) == "s"
assert str(joule) == "kg*m**2/s**2" # issue 2461
def test_wild_str():
# Check expressions containing Wild not causing infinite recursion
w = Wild('x')
assert str(w + 1) == 'x_ + 1'
assert str(exp(2**w) + 5) == 'exp(2**x_) + 5'
assert str(3*w + 1) == '3*x_ + 1'
assert str(1/w + 1) == '1 + 1/x_'
assert str(w**2 + 1) == 'x_**2 + 1'
assert str(1/(1-w)) == '1/(-x_ + 1)'
def test_zeta():
assert str(zeta(3)) == "zeta(3)"
def test_bug2():
e = x-y
a = str(e)
b = str(e)
assert a == b
def test_bug4():
e = -2*sqrt(x)-y/sqrt(x)/2
assert str(e) not in ["(-2)*x**1/2(-1/2)*x**(-1/2)*y",
"-2*x**1/2(-1/2)*x**(-1/2)*y","-2*x**1/2-1/2*x**-1/2*w"]
assert str(e) == "-2*sqrt(x) - y/(2*sqrt(x))"
def test_issue922():
e = Integral(x,x) + 1
assert str(e) == 'Integral(x, x) + 1'
def test_sstrrepr():
assert sstr('abc') == 'abc'
assert sstrrepr('abc') == "'abc'"
e = ['a', 'b', 'c', x]
assert sstr(e) == "[a, b, c, x]"
assert sstrrepr(e) == "['a', 'b', 'c', x]"
def test_infinity():
assert sstr(oo*I) == "oo*I"
def test_full_prec():
assert sstr(S("0.3"), full_prec=True) == "0.300000000000000"
assert sstr(S("0.3"), full_prec="auto") == "0.300000000000000"
assert sstr(S("0.3"), full_prec=False) == "0.3"
assert sstr(S("0.3")*x, full_prec=True) in [
"0.300000000000000*x",
"x*0.300000000000000"
]
assert sstr(S("0.3")*x, full_prec="auto") in [
"0.3*x",
"x*0.3"
]
assert sstr(S("0.3")*x, full_prec=False) in [
"0.3*x",
"x*0.3"
]
def test_noncommutative():
A, B, C = symbols('A,B,C', commutative=False)
assert sstr(A*B*C**-1) == "A*B*C**(-1)"
assert sstr(C**-1*A*B) == "C**(-1)*A*B"
assert sstr(A*C**-1*B) == "A*C**(-1)*B"
assert sstr(sqrt(A)) == "sqrt(A)"
assert sstr(1/sqrt(A)) == "A**(-1/2)"
def test_empty_printer():
str_printer = StrPrinter()
assert str_printer.emptyPrinter("foo") == "foo"
assert str_printer.emptyPrinter(x*y) == "x*y"
assert str_printer.emptyPrinter(32) == "32"
def test_settings():
raises(TypeError, lambda: sstr(S(4), method="garbage"))
def test_RandomDomain():
from sympy.stats import Normal, Die, Exponential, pspace, where
X = Normal('x1', 0, 1)
assert str(where(X>0)) == "Domain: 0 < x1"
D = Die('d1', 6)
assert str(where(D>4)) == "Domain: Or(d1 == 5, d1 == 6)"
A = Exponential('a', 1)
B = Exponential('b', 1)
assert str(pspace(Tuple(A,B)).domain) =="Domain: And(0 <= a, 0 <= b)"
def test_FiniteSet():
assert str(FiniteSet(range(1, 51))) == '{1, 2, 3, ..., 48, 49, 50}'
assert str(FiniteSet(range(1, 6))) == '{1, 2, 3, 4, 5}'
def test_PrettyPoly():
from sympy.polys.domains import QQ
F = QQ.frac_field(x, y)
R = QQ[x, y]
assert sstr(F.convert(x/(x + y))) == sstr(x/(x + y))
assert sstr(R.convert(x + y)) == sstr(x + y)
def test_categories():
from sympy.categories import (Object, Morphism, NamedMorphism,
IdentityMorphism, Category)
A = Object("A")
B = Object("B")
f = NamedMorphism(A, B, "f")
id_A = IdentityMorphism(A)
K = Category("K")
assert str(A) == 'Object("A")'
assert str(f) == 'NamedMorphism(Object("A"), Object("B"), "f")'
assert str(id_A) == 'IdentityMorphism(Object("A"))'
assert str(K) == 'Category("K")'
def test_Tr():
A, B = symbols('A B', commutative=False)
t = Tr(A*B)
assert str(t) == 'Tr(A*B)'
|
# model settings
_base_ = [
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
type='DSC',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='DSCRoIHead',
num_stages=3,
stage_loss_weights=[dict(loss_mpn=1, loss_bbox=1, loss_cls=1),
dict(loss_mpn=1, loss_bbox=0.5, loss_cls=1),
dict(loss_mpn=1, loss_bbox=0.5, loss_cls=1),
dict(loss_mask=1)],
semantic_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=0),
out_channels=256,
featmap_strides=[8]),
semantic_head=dict(
type='FusedSemanticHead',
num_ins=5,
fusion_level=1,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
ignore_label=255,
loss_weight=0.2),
relative_roi_extractor=dict(
type='RelativeRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0),
out_channels=256,
featmap_strides=[1.0]),
mpn_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mpn=[
dict(
type='DSCMaskHead',
with_conv_res=False,
num_convs=4,
in_channels=256,
conv_out_channels=256,
class_agnostic=True,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='DSCMaskHead',
with_conv_res=True,
num_convs=4,
in_channels=256,
conv_out_channels=256,
class_agnostic=True,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
dict(
type='DSCMaskHead',
with_conv_res=True,
num_convs=4,
in_channels=256,
conv_out_channels=256,
class_agnostic=True,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))],
bbox_roi_extractor=dict(
type='SgSingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCDSCBBoxHead',
conv_res=1,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCDSCBBoxHead',
conv_res=1,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCDSCBBoxHead',
conv_res=1,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='DSCMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=14,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='PseudoSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=False),
mask_size=14,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='PseudoSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=False),
mask_size=14,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='PseudoSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=False),
mask_size=28,
pos_weight=-1,
debug=False),
])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.001,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5))
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='SegRescale', scale_factor=1 / 8),
dict(type='DefaultFormatBundle'),
dict(
type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(
seg_prefix=data_root + 'stuffthingmaps/train2017/',
pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)),
],
),
]
|
"""This file is needed for editable installs (`pip install -e .`).
Can be removed once the following is resolved
https://github.com/pypa/packaging-problems/issues/256
"""
from setuptools import setup
setup()
|
############### Injector Parameters ##################
#
# config - config file used by the compiler pass
# funcList - list of functions that are faulty
# prob - probability that instuction is faulty
# byte - which byte is faulty (0-7) -1 random
# singleInj - one injection per active rank (0 or 1)
# ptr - add code to inject into pointers (0 or 1)
# arith - add code to inject into mathematics (0 or 1)
# ctrl - add code to inject into control (0 or 1)
# stateFile - unique counter for fault site index;
# should differ based on application
#
#####################################################
config = "jacobi.config"
funcList = "\"\""
prob = 1e-5
byte = -1
singleInj = 1
ptr = 1
arith = 1
ctrl = 1
stateFile = "jacobi"
############# Library Parameters #####################
#
# FLIPIT_PATH - Path to FlipIt repo
# SHOW - libraries and path wraped by mpicc
#
#####################################################
import os
FLIPIT_PATH = os.environ['FLIPIT_PATH']
LLVM_BUILD_PATH = os.environ['LLVM_BUILD_PATH']
SHOW = ""
########### Files to NOT inject inside ###############
notInject = [" ", " "]
############ Default Compiler #################
cc = "mpicc"
############ Verbose compiler output ##############
verbose = False
|
def num(a):
num = int(a)
if (num < 10):
return (num + num)
elif (num <100):
return (num + num //10 + num % 10)
elif (num <1000):
return (num + num //100 + ( (num //10) % 10) + num % 10)
else:
return (num + num //1000 + ((num //100) % 10) + ((num //10) % 10) + num %10)
count = list(range(10000))
for i in range (10000):
temp = num(i)
if (temp >= 10000):
continue
else:
count[temp] = -1
for i in range (10000):
if (count[i] != -1):
print (i)
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class virwox (Exchange):
def describe(self):
return self.deep_extend(super(virwox, self).describe(), {
'id': 'virwox',
'name': 'VirWoX',
'countries': ['AT', 'EU'],
'rateLimit': 1000,
'has': {
'CORS': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766894-6da9d360-5eea-11e7-90aa-41f2711b7405.jpg',
'api': {
'public': 'https://api.virwox.com/api/json.php',
'private': 'https://www.virwox.com/api/trading.php',
},
'www': 'https://www.virwox.com',
'doc': 'https://www.virwox.com/developers.php',
},
'requiredCredentials': {
'apiKey': True,
'secret': False,
'login': True,
'password': True,
},
'api': {
'public': {
'get': [
'getInstruments',
'getBestPrices',
'getMarketDepth',
'estimateMarketOrder',
'getTradedPriceVolume',
'getRawTradeData',
'getStatistics',
'getTerminalList',
'getGridList',
'getGridStatistics',
],
'post': [
'getInstruments',
'getBestPrices',
'getMarketDepth',
'estimateMarketOrder',
'getTradedPriceVolume',
'getRawTradeData',
'getStatistics',
'getTerminalList',
'getGridList',
'getGridStatistics',
],
},
'private': {
'get': [
'cancelOrder',
'getBalances',
'getCommissionDiscount',
'getOrders',
'getTransactions',
'placeOrder',
],
'post': [
'cancelOrder',
'getBalances',
'getCommissionDiscount',
'getOrders',
'getTransactions',
'placeOrder',
],
},
},
})
def fetch_markets(self, params={}):
markets = self.publicGetGetInstruments()
keys = list(markets['result'].keys())
result = []
for p in range(0, len(keys)):
market = markets['result'][keys[p]]
id = market['instrumentID']
symbol = market['symbol']
base = market['longCurrency']
quote = market['shortCurrency']
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetBalances()
balances = response['result']['accountList']
result = {'info': balances}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['currency']
total = balance['balance']
account = {
'free': total,
'used': 0.0,
'total': total,
}
result[currency] = account
return self.parse_balance(result)
def fetch_market_price(self, symbol, params={}):
self.load_markets()
response = self.publicPostGetBestPrices(self.extend({
'symbols': [symbol],
}, params))
result = response['result']
return {
'bid': self.safe_float(result[0], 'bestBuyPrice'),
'ask': self.safe_float(result[0], 'bestSellPrice'),
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbols': [symbol],
}
if limit is not None:
request['buyDepth'] = limit # 100
request['sellDepth'] = limit # 100
response = self.publicPostGetMarketDepth(self.extend(request, params))
orderbook = response['result'][0]
return self.parse_order_book(orderbook, None, 'buy', 'sell', 'price', 'volume')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
end = self.milliseconds()
start = end - 86400000
response = self.publicGetGetTradedPriceVolume(self.extend({
'instrument': symbol,
'endDate': self.ymdhms(end),
'startDate': self.ymdhms(start),
'HLOC': 1,
}, params))
tickers = response['result']['priceVolumeList']
keys = list(tickers.keys())
length = len(keys)
lastKey = keys[length - 1]
ticker = tickers[lastKey]
timestamp = self.milliseconds()
close = self.safe_float(ticker, 'close')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'longVolume'),
'quoteVolume': self.safe_float(ticker, 'shortVolume'),
'info': ticker,
}
def parse_trade(self, trade, symbol=None):
sec = self.safe_integer(trade, 'time')
timestamp = sec * 1000
return {
'id': trade['tid'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': None,
'symbol': symbol,
'type': None,
'side': None,
'price': self.safe_float(trade, 'price'),
'amount': self.safe_float(trade, 'vol'),
'fee': None,
'info': trade,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetGetRawTradeData(self.extend({
'instrument': symbol,
'timespan': 3600,
}, params))
result = self.safe_value(response, 'result', {})
trades = self.safe_value(result, 'data', [])
return self.parse_trades(trades, market)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'instrument': market['symbol'],
'orderType': side.upper(),
'amount': amount,
}
if type == 'limit':
request['price'] = price
response = self.privatePostPlaceOrder(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response['result'], 'orderID'),
}
def cancel_order(self, id, symbol=None, params={}):
request = {
'orderID': id,
}
return self.privatePostCancelOrder(self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
auth = {}
if api == 'private':
self.check_required_credentials()
auth['key'] = self.apiKey
auth['user'] = self.login
auth['pass'] = self.password
nonce = self.nonce()
if method == 'GET':
url += '?' + self.urlencode(self.extend({
'method': path,
'id': nonce,
}, auth, params))
else:
headers = {'Content-Type': 'application/json'}
body = self.json({
'method': path,
'params': self.extend(auth, params),
'id': nonce,
})
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if code == 200:
if (body[0] == '{') or (body[0] == '['):
if 'result' in response:
result = response['result']
if 'errorCode' in result:
errorCode = result['errorCode']
if errorCode != 'OK':
raise ExchangeError(self.id + ' error returned: ' + body)
else:
raise ExchangeError(self.id + ' malformed response: no result in response: ' + body)
else:
# if not a JSON response
raise ExchangeError(self.id + ' returned a non-JSON reply: ' + body)
|
from marshmallow import fields
from .field_set import FieldSet, FieldSetSchema
class Destination(FieldSet):
def __init__(self,
address: str = None,
bytes: int = None,
domain: str = None,
ip: str = None,
mac: str = None,
packets: int = None,
port: int = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.address = address
self.bytes = bytes
self.domain = domain
self.ip = ip
self.mac = mac
self.packets = packets
self.port = port
class DestinationSchema(FieldSetSchema):
address = fields.String()
bytes = fields.Integer()
domain = fields.String()
ip = fields.String()
mac = fields.String()
packets = fields.Integer()
port = fields.Integer()
|
import nltk.corpus
import nltk.tokenize.punkt
import nltk.stem.snowball
from nltk.corpus import wordnet
import string
paths = nltk.data.path
#if '/var/nltk_data' not in paths:
# nltk.data.path.append("/var/nltk_data");
# if '/mnt/vol1/nltk_data' not in paths:
# nltk.data.path.append("/mnt/vol1/nltk_data");
#nltk.download('stopwords')
stopwords = nltk.corpus.stopwords.words('english')
stopwords.extend(string.punctuation)
stopwords.append('')
# Based on tornado.ioloop.IOLoop.instance() approach.
# See https://github.com/facebook/tornado
#wordnetConst = {"J":wordnet.ADJ,"V":wordnet.VERB,"N":wordnet.NOUN,"R":wordnet.ADV}
#hard code for above code
wordnetConst = {"J":u'a',"V":u'v',"N":u'n',"R":u'r'}
get_wordnet_pos = lambda x: (x[0],wordnetConst.get(x[1][0],wordnet.NOUN))
tokenizer = nltk.tokenize.TreebankWordTokenizer()
lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()
lemmatize = lemmatizer.lemmatize
def get_proximity_match_ratio(a, b, threshold=0.8):
"""Check if a and b are matches."""
pos_a = map(get_wordnet_pos,nltk.pos_tag(tokenizer.tokenize(a)))
pos_b = map(get_wordnet_pos,nltk.pos_tag(tokenizer.tokenize(b)))
lemmae_a = [lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_a \
if token.lower().strip(string.punctuation) not in stopwords]
lemmae_b = [lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_b \
if token.lower().strip(string.punctuation) not in stopwords]
# Calculate Jaccard similarity
ratio = len(set(lemmae_a).intersection(lemmae_b)) / float(len(set(lemmae_a).union(lemmae_b)))
#return (ratio >= threshold)
if ratio <= 0.5 and ratio > 0.3:
from difflib import SequenceMatcher
ratio = SequenceMatcher(None, a, b).ratio()
words_in_v1 = a.split()
words_in_v2 = b.split()
word_match = 0
for w1 in words_in_v1:
if w1 in words_in_v2:
word_match = word_match + 1
if word_match != 0:
if len(words_in_v1) >= len(words_in_v2):
r = float(word_match)/len(words_in_v1)
elif len(words_in_v1) < len(words_in_v2):
r = float(word_match)/len(words_in_v2)
if r > ratio:
ratio = r
return ratio
import time
if __name__ == "__main__":
t1 = time.time()
print 'Nikon Coolpix L31 Point & Shoot Camera',"|",'Nikon Coolpix L31 Point & Shoot Camera(Black)'
print get_proximity_match_ratio('Nikon Coolpix L31 Point & Shoot Camera', 'Nikon Coolpix L31 Point & Shoot Camera(Black)')
print "time>",time.time()-t1
print "------------------------------------------"
t1 = time.time()
print 'I have 5.5" Inch product',"|",'my product 5.5 Inches Inch'
print get_proximity_match_ratio('I have 5.5" Inch product', 'my product 5.5 Inches Inch')
print "time>",time.time()-t1
print "------------------------------------------"
t1 = time.time()
print 'iphone 6s cover for me',"|",'i have phone for me'
print get_proximity_match_ratio('iphone 6s cover for me', 'i have phone for me')
print "time>",time.time()-t1
print get_proximity_match_ratio('Nikon Coolpix L31 Point & Shoot Camera', 'Nikon Coolpix L31 Point & Shoot Camera(Black)')
print get_proximity_match_ratio('In the eighteenth century it was often convenient to regard man as a clockwork automaton.', 'In the 1700s, it was common to regard man as a clockwork automaton.')
print get_proximity_match_ratio('SAMSUNG 9000 mAh for Smart Phone', 'SAMSUNG 9000 mAh for Smart Phone And Tablets')
print get_proximity_match_ratio('5.5" Inch', '5.5 Inches Inch')
print get_proximity_match_ratio('mahalaxmi gold coin', 'rose gold coin')
print get_proximity_match_ratio('10 gms silver coin of lakshmi ganesh.. ', 'silver currency note of 1000 rs.')
print breakStringInTOWords('Samsung Galaxy Tab 3 T211 Tablet (White)')
print get_proximity_match_ratio('sony 1tb normal external hard drive (black)', 'sony 1tb wired external hard drive ()')
|
# Generated by Django 2.2.24 on 2021-08-24 10:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vespawatch', '0052_auto_20210824_1131'),
]
operations = [
migrations.AddField(
model_name='managementaction',
name='result',
field=models.CharField(choices=[('ST', 'Successfully treated'), ('UT', 'Unsuccessfully treated'), ('UN', 'Untreated'), ('UK', 'UK')], default='UK', max_length=3, verbose_name='Result'),
),
]
|
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-like` ` not-like` | | `container-status` | `is` ` is-not` | | `containers` | `are` | | `criticality-tag` | `is` ` is-not` ` is-greater-than` ` is-less-than` ` is-applied` ` is-not-applied` | | `custom-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `cve` | `is` ` is-not` ` contains` ` does-not-contain` | | `cvss-access-complexity` | `is` ` is-not` | | `cvss-authentication-required` | `is` ` is-not` | | `cvss-access-vector` | `is` ` is-not` | | `cvss-availability-impact` | `is` ` is-not` | | `cvss-confidentiality-impact` | `is` ` is-not` | | `cvss-integrity-impact` | `is` ` is-not` | | `cvss-v3-confidentiality-impact` | `is` ` is-not` | | `cvss-v3-integrity-impact` | `is` ` is-not` | | `cvss-v3-availability-impact` | `is` ` is-not` | | `cvss-v3-attack-vector` | `is` ` is-not` | | `cvss-v3-attack-complexity` | `is` ` is-not` | | `cvss-v3-user-interaction` | `is` ` is-not` | | `cvss-v3-privileges-required` | `is` ` is-not` | | `host-name` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-empty` ` is-not-empty` ` is-like` ` not-like` | | `host-type` | `in` ` not-in` | | `ip-address` | `is` ` is-not` ` in-range` ` not-in-range` ` is-like` ` not-like` | | `ip-address-type` | `in` ` not-in` | | `last-scan-date` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `location-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `open-ports` | `is` ` is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is-not` ` in-range` ` greater-than` ` less-than` | | `service-name` | `contains` ` does-not-contain` | | `site-id` | `in` ` not-in` | | `software` | `contains` ` does-not-contain` | | `vAsset-cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-datacenter` | `is` ` is-not` | | `vAsset-host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-power-state` | `in` ` not-in` | | `vAsset-resource-pool-path` | `contains` ` does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vulnerability-category` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` | | `vulnerability-cvss-v3-score` | `is` ` is-not` | | `vulnerability-cvss-score` | `is` ` is-not` ` in-range` ` is-greater-than` ` is-less-than` | | `vulnerability-exposures` | `includes` ` does-not-include` | | `vulnerability-title` | `contains` ` does-not-contain` ` is` ` is-not` ` starts-with` ` ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from py_insightvm_sdk.models.link import Link # noqa: F401,E501
from py_insightvm_sdk.models.vulnerability_validation_resource import VulnerabilityValidationResource # noqa: F401,E501
class ResourcesVulnerabilityValidationResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'links': 'list[Link]',
'resources': 'list[VulnerabilityValidationResource]'
}
attribute_map = {
'links': 'links',
'resources': 'resources'
}
def __init__(self, links=None, resources=None): # noqa: E501
"""ResourcesVulnerabilityValidationResource - a model defined in Swagger""" # noqa: E501
self._links = None
self._resources = None
self.discriminator = None
if links is not None:
self.links = links
if resources is not None:
self.resources = resources
@property
def links(self):
"""Gets the links of this ResourcesVulnerabilityValidationResource. # noqa: E501
Hypermedia links to corresponding or related resources. # noqa: E501
:return: The links of this ResourcesVulnerabilityValidationResource. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourcesVulnerabilityValidationResource.
Hypermedia links to corresponding or related resources. # noqa: E501
:param links: The links of this ResourcesVulnerabilityValidationResource. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def resources(self):
"""Gets the resources of this ResourcesVulnerabilityValidationResource. # noqa: E501
The resources returned. # noqa: E501
:return: The resources of this ResourcesVulnerabilityValidationResource. # noqa: E501
:rtype: list[VulnerabilityValidationResource]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ResourcesVulnerabilityValidationResource.
The resources returned. # noqa: E501
:param resources: The resources of this ResourcesVulnerabilityValidationResource. # noqa: E501
:type: list[VulnerabilityValidationResource]
"""
self._resources = resources
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcesVulnerabilityValidationResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcesVulnerabilityValidationResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# logcall.py
from functools import wraps
def logged(func):
# Idea: Give me a function, I'll put logging
# around it
print('Adding logging to', func.__name__)
@wraps(func)
def wrapper(*args, **kwargs):
print('You called', func.__name__)
return func(*args, **kwargs)
return wrapper
|
from sympy.utilities.pytest import XFAIL, raises
from sympy import (S, Symbol, symbols, nan, oo, I, pi, Float, And, Or, Not,
Implies, Xor, zoo, sqrt, Rational, simplify, Function)
from sympy.core.compatibility import range
from sympy.core.relational import (Relational, Equality, Unequality,
GreaterThan, LessThan, StrictGreaterThan,
StrictLessThan, Rel, Eq, Lt, Le,
Gt, Ge, Ne)
from sympy.sets.sets import Interval, FiniteSet
x, y, z, t = symbols('x,y,z,t')
def test_rel_ne():
assert Relational(x, y, '!=') == Ne(x, y)
def test_rel_subs():
e = Relational(x, y, '==')
e = e.subs(x, z)
assert isinstance(e, Equality)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '>=')
e = e.subs(x, z)
assert isinstance(e, GreaterThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '<=')
e = e.subs(x, z)
assert isinstance(e, LessThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '>')
e = e.subs(x, z)
assert isinstance(e, StrictGreaterThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '<')
e = e.subs(x, z)
assert isinstance(e, StrictLessThan)
assert e.lhs == z
assert e.rhs == y
e = Eq(x, 0)
assert e.subs(x, 0) is S.true
assert e.subs(x, 1) is S.false
def test_wrappers():
e = x + x**2
res = Relational(y, e, '==')
assert Rel(y, x + x**2, '==') == res
assert Eq(y, x + x**2) == res
res = Relational(y, e, '<')
assert Lt(y, x + x**2) == res
res = Relational(y, e, '<=')
assert Le(y, x + x**2) == res
res = Relational(y, e, '>')
assert Gt(y, x + x**2) == res
res = Relational(y, e, '>=')
assert Ge(y, x + x**2) == res
res = Relational(y, e, '!=')
assert Ne(y, x + x**2) == res
def test_Eq():
assert Eq(x**2) == Eq(x**2, 0)
assert Eq(x**2) != Eq(x**2, 1)
assert Eq(x, x) # issue 5719
# issue 6116
p = Symbol('p', positive=True)
assert Eq(p, 0) is S.false
def test_rel_Infinity():
# NOTE: All of these are actually handled by sympy.core.Number, and do
# not create Relational objects.
assert (oo > oo) is S.false
assert (oo > -oo) is S.true
assert (oo > 1) is S.true
assert (oo < oo) is S.false
assert (oo < -oo) is S.false
assert (oo < 1) is S.false
assert (oo >= oo) is S.true
assert (oo >= -oo) is S.true
assert (oo >= 1) is S.true
assert (oo <= oo) is S.true
assert (oo <= -oo) is S.false
assert (oo <= 1) is S.false
assert (-oo > oo) is S.false
assert (-oo > -oo) is S.false
assert (-oo > 1) is S.false
assert (-oo < oo) is S.true
assert (-oo < -oo) is S.false
assert (-oo < 1) is S.true
assert (-oo >= oo) is S.false
assert (-oo >= -oo) is S.true
assert (-oo >= 1) is S.false
assert (-oo <= oo) is S.true
assert (-oo <= -oo) is S.true
assert (-oo <= 1) is S.true
def test_bool():
assert Eq(0, 0) is S.true
assert Eq(1, 0) is S.false
assert Ne(0, 0) is S.false
assert Ne(1, 0) is S.true
assert Lt(0, 1) is S.true
assert Lt(1, 0) is S.false
assert Le(0, 1) is S.true
assert Le(1, 0) is S.false
assert Le(0, 0) is S.true
assert Gt(1, 0) is S.true
assert Gt(0, 1) is S.false
assert Ge(1, 0) is S.true
assert Ge(0, 1) is S.false
assert Ge(1, 1) is S.true
assert Eq(I, 2) is S.false
assert Ne(I, 2) is S.true
raises(TypeError, lambda: Gt(I, 2))
raises(TypeError, lambda: Ge(I, 2))
raises(TypeError, lambda: Lt(I, 2))
raises(TypeError, lambda: Le(I, 2))
a = Float('.000000000000000000001', '')
b = Float('.0000000000000000000001', '')
assert Eq(pi + a, pi + b) is S.false
def test_rich_cmp():
assert (x < y) == Lt(x, y)
assert (x <= y) == Le(x, y)
assert (x > y) == Gt(x, y)
assert (x >= y) == Ge(x, y)
def test_doit():
from sympy import Symbol
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
np = Symbol('np', nonpositive=True)
nn = Symbol('nn', nonnegative=True)
assert Gt(p, 0).doit() is S.true
assert Gt(p, 1).doit() == Gt(p, 1)
assert Ge(p, 0).doit() is S.true
assert Le(p, 0).doit() is S.false
assert Lt(n, 0).doit() is S.true
assert Le(np, 0).doit() is S.true
assert Gt(nn, 0).doit() == Gt(nn, 0)
assert Lt(nn, 0).doit() is S.false
assert Eq(x, 0).doit() == Eq(x, 0)
def test_new_relational():
x = Symbol('x')
assert Eq(x) == Relational(x, 0) # None ==> Equality
assert Eq(x) == Relational(x, 0, '==')
assert Eq(x) == Relational(x, 0, 'eq')
assert Eq(x) == Equality(x, 0)
assert Eq(x, -1) == Relational(x, -1) # None ==> Equality
assert Eq(x, -1) == Relational(x, -1, '==')
assert Eq(x, -1) == Relational(x, -1, 'eq')
assert Eq(x, -1) == Equality(x, -1)
assert Eq(x) != Relational(x, 1) # None ==> Equality
assert Eq(x) != Relational(x, 1, '==')
assert Eq(x) != Relational(x, 1, 'eq')
assert Eq(x) != Equality(x, 1)
assert Eq(x, -1) != Relational(x, 1) # None ==> Equality
assert Eq(x, -1) != Relational(x, 1, '==')
assert Eq(x, -1) != Relational(x, 1, 'eq')
assert Eq(x, -1) != Equality(x, 1)
assert Ne(x, 0) == Relational(x, 0, '!=')
assert Ne(x, 0) == Relational(x, 0, '<>')
assert Ne(x, 0) == Relational(x, 0, 'ne')
assert Ne(x, 0) == Unequality(x, 0)
assert Ne(x, 0) != Relational(x, 1, '!=')
assert Ne(x, 0) != Relational(x, 1, '<>')
assert Ne(x, 0) != Relational(x, 1, 'ne')
assert Ne(x, 0) != Unequality(x, 1)
assert Ge(x, 0) == Relational(x, 0, '>=')
assert Ge(x, 0) == Relational(x, 0, 'ge')
assert Ge(x, 0) == GreaterThan(x, 0)
assert Ge(x, 1) != Relational(x, 0, '>=')
assert Ge(x, 1) != Relational(x, 0, 'ge')
assert Ge(x, 1) != GreaterThan(x, 0)
assert (x >= 1) == Relational(x, 1, '>=')
assert (x >= 1) == Relational(x, 1, 'ge')
assert (x >= 1) == GreaterThan(x, 1)
assert (x >= 0) != Relational(x, 1, '>=')
assert (x >= 0) != Relational(x, 1, 'ge')
assert (x >= 0) != GreaterThan(x, 1)
assert Le(x, 0) == Relational(x, 0, '<=')
assert Le(x, 0) == Relational(x, 0, 'le')
assert Le(x, 0) == LessThan(x, 0)
assert Le(x, 1) != Relational(x, 0, '<=')
assert Le(x, 1) != Relational(x, 0, 'le')
assert Le(x, 1) != LessThan(x, 0)
assert (x <= 1) == Relational(x, 1, '<=')
assert (x <= 1) == Relational(x, 1, 'le')
assert (x <= 1) == LessThan(x, 1)
assert (x <= 0) != Relational(x, 1, '<=')
assert (x <= 0) != Relational(x, 1, 'le')
assert (x <= 0) != LessThan(x, 1)
assert Gt(x, 0) == Relational(x, 0, '>')
assert Gt(x, 0) == Relational(x, 0, 'gt')
assert Gt(x, 0) == StrictGreaterThan(x, 0)
assert Gt(x, 1) != Relational(x, 0, '>')
assert Gt(x, 1) != Relational(x, 0, 'gt')
assert Gt(x, 1) != StrictGreaterThan(x, 0)
assert (x > 1) == Relational(x, 1, '>')
assert (x > 1) == Relational(x, 1, 'gt')
assert (x > 1) == StrictGreaterThan(x, 1)
assert (x > 0) != Relational(x, 1, '>')
assert (x > 0) != Relational(x, 1, 'gt')
assert (x > 0) != StrictGreaterThan(x, 1)
assert Lt(x, 0) == Relational(x, 0, '<')
assert Lt(x, 0) == Relational(x, 0, 'lt')
assert Lt(x, 0) == StrictLessThan(x, 0)
assert Lt(x, 1) != Relational(x, 0, '<')
assert Lt(x, 1) != Relational(x, 0, 'lt')
assert Lt(x, 1) != StrictLessThan(x, 0)
assert (x < 1) == Relational(x, 1, '<')
assert (x < 1) == Relational(x, 1, 'lt')
assert (x < 1) == StrictLessThan(x, 1)
assert (x < 0) != Relational(x, 1, '<')
assert (x < 0) != Relational(x, 1, 'lt')
assert (x < 0) != StrictLessThan(x, 1)
# finally, some fuzz testing
from random import randint
from sympy.core.compatibility import unichr
for i in range(100):
while 1:
strtype, length = (unichr, 65535) if randint(0, 1) else (chr, 255)
relation_type = strtype(randint(0, length))
if randint(0, 1):
relation_type += strtype(randint(0, length))
if relation_type not in ('==', 'eq', '!=', '<>', 'ne', '>=', 'ge',
'<=', 'le', '>', 'gt', '<', 'lt'):
break
raises(ValueError, lambda: Relational(x, 1, relation_type))
def test_relational_bool_output():
# https://github.com/sympy/sympy/issues/5931
raises(TypeError, lambda: bool(x > 3))
raises(TypeError, lambda: bool(x >= 3))
raises(TypeError, lambda: bool(x < 3))
raises(TypeError, lambda: bool(x <= 3))
raises(TypeError, lambda: bool(Eq(x, 3)))
raises(TypeError, lambda: bool(Ne(x, 3)))
def test_relational_logic_symbols():
# See issue 6204
assert (x < y) & (z < t) == And(x < y, z < t)
assert (x < y) | (z < t) == Or(x < y, z < t)
assert ~(x < y) == Not(x < y)
assert (x < y) >> (z < t) == Implies(x < y, z < t)
assert (x < y) << (z < t) == Implies(z < t, x < y)
assert (x < y) ^ (z < t) == Xor(x < y, z < t)
assert isinstance((x < y) & (z < t), And)
assert isinstance((x < y) | (z < t), Or)
assert isinstance(~(x < y), GreaterThan)
assert isinstance((x < y) >> (z < t), Implies)
assert isinstance((x < y) << (z < t), Implies)
assert isinstance((x < y) ^ (z < t), (Or, Xor))
def test_univariate_relational_as_set():
assert (x > 0).as_set() == Interval(0, oo, True, True)
assert (x >= 0).as_set() == Interval(0, oo)
assert (x < 0).as_set() == Interval(-oo, 0, True, True)
assert (x <= 0).as_set() == Interval(-oo, 0)
assert Eq(x, 0).as_set() == FiniteSet(0)
assert Ne(x, 0).as_set() == Interval(-oo, 0, True, True) + \
Interval(0, oo, True, True)
assert (x**2 >= 4).as_set() == Interval(-oo, -2) + Interval(2, oo)
@XFAIL
def test_multivariate_relational_as_set():
assert (x*y >= 0).as_set() == Interval(0, oo)*Interval(0, oo) + \
Interval(-oo, 0)*Interval(-oo, 0)
def test_Not():
assert Not(Equality(x, y)) == Unequality(x, y)
assert Not(Unequality(x, y)) == Equality(x, y)
assert Not(StrictGreaterThan(x, y)) == LessThan(x, y)
assert Not(StrictLessThan(x, y)) == GreaterThan(x, y)
assert Not(GreaterThan(x, y)) == StrictLessThan(x, y)
assert Not(LessThan(x, y)) == StrictGreaterThan(x, y)
def test_evaluate():
assert str(Eq(x, x, evaluate=False)) == 'Eq(x, x)'
assert Eq(x, x, evaluate=False).doit() == S.true
assert str(Ne(x, x, evaluate=False)) == 'Ne(x, x)'
assert Ne(x, x, evaluate=False).doit() == S.false
assert str(Ge(x, x, evaluate=False)) == 'x >= x'
assert str(Le(x, x, evaluate=False)) == 'x <= x'
assert str(Gt(x, x, evaluate=False)) == 'x > x'
assert str(Lt(x, x, evaluate=False)) == 'x < x'
def assert_all_ineq_raise_TypeError(a, b):
raises(TypeError, lambda: a > b)
raises(TypeError, lambda: a >= b)
raises(TypeError, lambda: a < b)
raises(TypeError, lambda: a <= b)
raises(TypeError, lambda: b > a)
raises(TypeError, lambda: b >= a)
raises(TypeError, lambda: b < a)
raises(TypeError, lambda: b <= a)
def assert_all_ineq_give_class_Inequality(a, b):
"""All inequality operations on `a` and `b` result in class Inequality."""
from sympy.core.relational import _Inequality as Inequality
assert isinstance(a > b, Inequality)
assert isinstance(a >= b, Inequality)
assert isinstance(a < b, Inequality)
assert isinstance(a <= b, Inequality)
assert isinstance(b > a, Inequality)
assert isinstance(b >= a, Inequality)
assert isinstance(b < a, Inequality)
assert isinstance(b <= a, Inequality)
def test_imaginary_compare_raises_TypeError():
# See issue #5724
assert_all_ineq_raise_TypeError(I, x)
def test_complex_compare_not_real():
# two cases which are not real
y = Symbol('y', imaginary=True)
z = Symbol('z', complex=True, real=False)
for w in (y, z):
assert_all_ineq_raise_TypeError(2, w)
# some cases which should remain un-evaluated
t = Symbol('t')
x = Symbol('x', real=True)
z = Symbol('z', complex=True)
for w in (x, z, t):
assert_all_ineq_give_class_Inequality(2, w)
def test_imaginary_and_inf_compare_raises_TypeError():
# See pull request #7835
y = Symbol('y', imaginary=True)
assert_all_ineq_raise_TypeError(oo, y)
assert_all_ineq_raise_TypeError(-oo, y)
def test_complex_pure_imag_not_ordered():
raises(TypeError, lambda: 2*I < 3*I)
# more generally
x = Symbol('x', real=True, nonzero=True)
y = Symbol('y', imaginary=True)
z = Symbol('z', complex=True)
assert_all_ineq_raise_TypeError(I, y)
t = I*x # an imaginary number, should raise errors
assert_all_ineq_raise_TypeError(2, t)
t = -I*y # a real number, so no errors
assert_all_ineq_give_class_Inequality(2, t)
t = I*z # unknown, should be unevaluated
assert_all_ineq_give_class_Inequality(2, t)
def test_x_minus_y_not_same_as_x_lt_y():
"""
A consequence of pull request #7792 is that `x - y < 0` and `x < y`
are not synonymous.
"""
x = I + 2
y = I + 3
raises(TypeError, lambda: x < y)
assert x - y < 0
ineq = Lt(x, y, evaluate=False)
raises(TypeError, lambda: ineq.doit())
assert ineq.lhs - ineq.rhs < 0
t = Symbol('t', imaginary=True)
x = 2 + t
y = 3 + t
ineq = Lt(x, y, evaluate=False)
raises(TypeError, lambda: ineq.doit())
assert ineq.lhs - ineq.rhs < 0
# this one should give error either way
x = I + 2
y = 2*I + 3
raises(TypeError, lambda: x < y)
raises(TypeError, lambda: x - y < 0)
def test_nan_equality_exceptions():
# See issue #7774
import random
assert Equality(nan, nan) is S.false
assert Unequality(nan, nan) is S.true
# See issue #7773
A = (x, S(0), S(1)/3, pi, oo, -oo)
assert Equality(nan, random.choice(A)) is S.false
assert Equality(random.choice(A), nan) is S.false
assert Unequality(nan, random.choice(A)) is S.true
assert Unequality(random.choice(A), nan) is S.true
def test_nan_inequality_raise_errors():
# See discussion in pull request #7776. We test inequalities with
# a set including examples of various classes.
for q in (x, S(0), S(10), S(1)/3, pi, S(1.3), oo, -oo, nan):
assert_all_ineq_raise_TypeError(q, nan)
def test_nan_complex_inequalities():
# Comparisons of NaN with non-real raise errors, we're not too
# fussy whether its the NaN error or complex error.
for r in (I, zoo, Symbol('z', imaginary=True)):
assert_all_ineq_raise_TypeError(r, nan)
def test_complex_infinity_inequalities():
raises(TypeError, lambda: zoo > 0)
raises(TypeError, lambda: zoo >= 0)
raises(TypeError, lambda: zoo < 0)
raises(TypeError, lambda: zoo <= 0)
def test_inequalities_symbol_name_same():
"""Using the operator and functional forms should give same results."""
# We test all combinations from a set
# FIXME: could replace with random selection after test passes
A = (x, y, S(0), S(1)/3, pi, oo, -oo)
for a in A:
for b in A:
assert Gt(a, b) == (a > b)
assert Lt(a, b) == (a < b)
assert Ge(a, b) == (a >= b)
assert Le(a, b) == (a <= b)
for b in (y, S(0), S(1)/3, pi, oo, -oo):
assert Gt(x, b, evaluate=False) == (x > b)
assert Lt(x, b, evaluate=False) == (x < b)
assert Ge(x, b, evaluate=False) == (x >= b)
assert Le(x, b, evaluate=False) == (x <= b)
for b in (y, S(0), S(1)/3, pi, oo, -oo):
assert Gt(b, x, evaluate=False) == (b > x)
assert Lt(b, x, evaluate=False) == (b < x)
assert Ge(b, x, evaluate=False) == (b >= x)
assert Le(b, x, evaluate=False) == (b <= x)
def test_inequalities_symbol_name_same_complex():
"""Using the operator and functional forms should give same results.
With complex non-real numbers, both should raise errors.
"""
# FIXME: could replace with random selection after test passes
for a in (x, S(0), S(1)/3, pi, oo):
raises(TypeError, lambda: Gt(a, I))
raises(TypeError, lambda: a > I)
raises(TypeError, lambda: Lt(a, I))
raises(TypeError, lambda: a < I)
raises(TypeError, lambda: Ge(a, I))
raises(TypeError, lambda: a >= I)
raises(TypeError, lambda: Le(a, I))
raises(TypeError, lambda: a <= I)
def test_inequalities_cant_sympify_other():
# see issue 7833
from operator import gt, lt, ge, le
bar = "foo"
for a in (x, S(0), S(1)/3, pi, I, zoo, oo, -oo, nan):
for op in (lt, gt, le, ge):
raises(TypeError, lambda: op(a, bar))
def test_ineq_avoid_wild_symbol_flip():
# see issue #7951, we try to avoid this internally, e.g., by using
# __lt__ instead of "<".
from sympy.core.symbol import Wild
p = symbols('p', cls=Wild)
# x > p might flip, but Gt should not:
assert Gt(x, p) == Gt(x, p, evaluate=False)
# Previously failed as 'p > x':
e = Lt(x, y).subs({y: p})
assert e == Lt(x, p, evaluate=False)
# Previously failed as 'p <= x':
e = Ge(x, p).doit()
assert e == Ge(x, p, evaluate=False)
def test_issue_8245():
a = S("6506833320952669167898688709329/5070602400912917605986812821504")
q = a.n(10)
assert (a == q) is True
assert (a != q) is False
assert (a > q) == False
assert (a < q) == False
assert (a >= q) == True
assert (a <= q) == True
a = sqrt(2)
r = Rational(str(a.n(30)))
assert (r == a) is False
assert (r != a) is True
assert (r > a) == True
assert (r < a) == False
assert (r >= a) == True
assert (r <= a) == False
a = sqrt(2)
r = Rational(str(a.n(29)))
assert (r == a) is False
assert (r != a) is True
assert (r > a) == False
assert (r < a) == True
assert (r >= a) == False
assert (r <= a) == True
def test_issue_8449():
p = Symbol('p', nonnegative=True)
assert Lt(-oo, p)
assert Ge(-oo, p) is S.false
assert Gt(oo, -p)
assert Le(oo, -p) is S.false
def test_simplify():
assert simplify(x*(y + 1) - x*y - x + 1 < x) == (x > 1)
assert simplify(S(1) < -x) == (x < -1)
def test_equals():
w, x, y, z = symbols('w:z')
f = Function('f')
assert Eq(x, 1).equals(Eq(x*(y + 1) - x*y - x + 1, x))
assert Eq(x, y).equals(x < y, True) == False
assert Eq(x, f(1)).equals(Eq(x, f(2)), True) == f(1) - f(2)
assert Eq(f(1), y).equals(Eq(f(2), y), True) == f(1) - f(2)
assert Eq(x, f(1)).equals(Eq(f(2), x), True) == f(1) - f(2)
assert Eq(f(1), x).equals(Eq(x, f(2)), True) == f(1) - f(2)
assert Eq(w, x).equals(Eq(y, z), True) == False
assert Eq(f(1), f(2)).equals(Eq(f(3), f(4)), True) == f(1) - f(3)
assert (x < y).equals(y > x, True) == True
assert (x < y).equals(y >= x, True) == False
assert (x < y).equals(z < y, True) == False
assert (x < y).equals(x < z, True) == False
assert (x < f(1)).equals(x < f(2), True) == f(1) - f(2)
assert (f(1) < x).equals(f(2) < x, True) == f(1) - f(2)
def test_reversed():
assert (x < y).reversed == (y > x)
assert (x <= y).reversed == (y >= x)
assert Eq(x, y, evaluate=False).reversed == Eq(y, x, evaluate=False)
assert Ne(x, y, evaluate=False).reversed == Ne(y, x, evaluate=False)
assert (x >= y).reversed == (y <= x)
assert (x > y).reversed == (y < x)
def test_canonical():
one = S(1)
def unchanged(v):
c = v.canonical
return v.is_Relational and c.is_Relational and v == c
def isreversed(v):
return v.canonical == v.reversed
assert unchanged(x < one)
assert unchanged(x <= one)
assert isreversed(Eq(one, x, evaluate=False))
assert unchanged(Eq(x, one, evaluate=False))
assert isreversed(Ne(one, x, evaluate=False))
assert unchanged(Ne(x, one, evaluate=False))
assert unchanged(x >= one)
assert unchanged(x > one)
assert unchanged(x < y)
assert unchanged(x <= y)
assert isreversed(Eq(y, x, evaluate=False))
assert unchanged(Eq(x, y, evaluate=False))
assert isreversed(Ne(y, x, evaluate=False))
assert unchanged(Ne(x, y, evaluate=False))
assert isreversed(x >= y)
assert isreversed(x > y)
assert (-x < 1).canonical == (x > -1)
assert isreversed(-x > y)
@XFAIL
def test_issue_8444():
x = symbols('x', real=True)
assert (x <= oo) == (x >= -oo) == True
x = symbols('x')
assert x >= floor(x)
assert (x < floor(x)) == False
assert Gt(x, floor(x)) == Gt(x, floor(x), evaluate=False)
assert Ge(x, floor(x)) == Ge(x, floor(x), evaluate=False)
assert x <= ceiling(x)
assert (x > ceiling(x)) == False
assert Lt(x, ceiling(x)) == Lt(x, ceiling(x), evaluate=False)
assert Le(x, ceiling(x)) == Le(x, ceiling(x), evaluate=False)
i = symbols('i', integer=True)
assert (i > floor(i)) == False
assert (i < ceiling(i)) == False
|
from flask import Flask, Response
from camera import Camera
import cv2
app = Flask(__name__)
camera = Camera().start()
def gen(camera):
while True:
frame = camera.read()
_, jpeg = cv2.imencode('.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n')
@app.route('/stream')
def stream():
return Response(gen(camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False, use_reloader=False)
|
from glob import glob
from setuptools import setup
from pybind11.setup_helpers import Pybind11Extension
ext_modules = [
Pybind11Extension(
"PFlib",
# sorted(glob("*.cpp")), # Sort source files for reproducibility
["particle_filter.cpp",],
swig_opts=['-ggdb',],
include_dirs=['include',]
),
]
setup(
name="PFlib",
# extra_compile_args=['-O0','-Wall','-g'],
# extra_compile_args=['-Iinclude'],
ext_modules=ext_modules,
)
|
from sklearn import svm, cluster
from PIL import Image, ImageDraw
import os
import sys
import random
def load_images(dirname):
images = []
for image_name in os.listdir(dirname):
if image_name.startswith('.'):
continue
image = Image.open(dirname + '/' + image_name).convert('1')
x, y = image.size
image = image.resize((x, 280), Image.ANTIALIAS)
data = [0 if pixel == 0 else 1 for pixel in image.getdata()]
images.append(data)
return images
min_len = 10000000
def normalize(X):
global min_len
min_len = min(min_len, min(len(x) for x in X))
return [x[:min_len] for x in X]
def crossvalidate(edges, nonedges):
random.shuffle(edges)
random.shuffle(nonedges)
train_edge_len, train_nonedge_len = len(edges) * 7 // 10, len(nonedges) * 7 // 10
cross_edge_len, cross_nonedge_len = len(edges) - train_edge_len, len(nonedges) - train_nonedge_len
X_train = normalize(nonedges[:train_nonedge_len] +
edges[:train_edge_len])
y_train = [0] * train_nonedge_len + [1] * train_edge_len
X_cross = normalize(nonedges[train_nonedge_len:] +
edges[train_edge_len:])
y_cross = [0] * cross_nonedge_len + [1] * cross_edge_len
clf = svm.SVC(gamma=.001, C=100.)
clf.fit(X_train, y_train)
print("prediction: {}".format(list(clf.predict(X_cross))))
print("actuallity: {}".format(y_cross))
print(clf.score(X_cross, y_cross))
def get_column(img, i):
w, h = img.size
column = []
for j in range(h):
column.append(0 if img.getpixel((i, j)) == 0 else 1)
return column
def search_picture(clf, image_name):
image = Image.open(image_name).convert('1')
x, y = image.size
image = image.resize((x, 280), Image.ANTIALIAS)
w, h = image.size
columns = [get_column(image, i) for i in range(25)]
datas = []
for i in range(25, w):
columns = columns[1:] + [get_column(image, i)]
data = [columns[i][j] for j in range(len(columns[0])) for i in range(len(columns))]
datas.append(data)
datas = normalize(datas)
matches = [[i] for i, m in enumerate(clf.predict(datas)) if m == 1]
if len(matches) == 0:
return [], matches
clst = cluster.DBSCAN(eps=20, min_samples=1)
clst.fit(matches)
trimmed = [idx for idx in clst.components_ if idx > w // 6 and idx < w * 5 // 6]
clst = cluster.KMeans(3, init='k-means++')
clst.fit(trimmed)
seps = list(sorted([int(v[0]) + 25//2 for v in clst.cluster_centers_]))
final_seps = []
for start, end in zip(seps, seps[1:]):
if (end - start) > w // 6:
final_seps.append(start)
final_seps.append(seps[-1])
return final_seps, matches
def train(edges, nonedges):
clf = svm.SVC(gamma=.001, C=100.)
X = normalize(nonedges + edges)
y = [0] * len(nonedges) + [1] * len(edges)
clf.fit(X, y)
return clf
def main(edge_dir, non_edge_dir):
edges = load_images(edge_dir)
nonedges = load_images(non_edge_dir)
crossvalidate(edges, nonedges)
clf = train(edges, nonedges)
for comic in os.listdir('test'):
print(comic)
panels, matches = search_picture(clf, 'test/' + comic)
print("\tpanels: {}".format(panels))
image = Image.open('test/' + comic).convert('RGBA')
draw = ImageDraw.Draw(image)
w, h = image.size
for match in matches:
match = match[0]
draw.line((match, 0) + (match, h), fill=(0,0,255,0))
for sep in panels:
draw.line((sep, 0) + (sep, h), fill=(255,0,0), width=3)
image.show()
return clf
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: {} <edges-dir> <non-edges-dir>'.format(sys.argv[0]))
sys.exit(1)
edge_dir = sys.argv[1]
non_edge_dir = sys.argv[2]
main(edge_dir, non_edge_dir)
|
import logging
import os
import re
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Any, Tuple, Optional
import pandas as pd
from python import TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX, TOKEN_IDX_TO, \
TOKEN_IDX_FROM, TOKEN, MENTION_ID, EVENT, MENTION_TYPE, DESCRIPTION, MENTION_TYPES_ACTION
logger = logging.getLogger()
def read_xml(xml_path) -> Tuple[Any, Any, Any, Any, Any]:
tree = ET.parse(xml_path)
# 1: read document info
root = tree.getroot()
assert root.tag == "Document"
doc_filename = root.attrib["doc_name"]
doc_id = root.attrib["doc_id"]
m = re.match(r"(?P<topic_id>\d+)_(?P<document_number>\d+)(?P<subtopic>\w+)\.xml", doc_filename)
topic_id = m.group("topic_id")
subtopic = m.group("subtopic")
document_number = int(m.group("document_number"))
documents_index = pd.MultiIndex.from_tuples([(topic_id, subtopic, doc_id)],
names=[TOPIC_ID, SUBTOPIC, DOCUMENT_ID])
documents = pd.DataFrame({DOCUMENT_ID: pd.Series(doc_id, index=documents_index),
DOCUMENT_NUMBER: pd.Series(document_number, index=documents_index)})
# 2: read document content
contents_rows = []
contents_index = []
for token_elmt in root.iter("token"):
# index content
sentence_idx = int(token_elmt.attrib["sentence"])
token_idx = int(token_elmt.attrib["number"])
contents_index.append((doc_id, sentence_idx, token_idx))
# content
token = token_elmt.text
contents_rows.append({TOKEN: token})
contents_index = pd.MultiIndex.from_tuples(contents_index, names=[DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX])
contents = pd.DataFrame(contents_rows, index=contents_index)
# 3: read markables / mentions and entity/event descriptions
mentions_rows = []
mentions_index = []
entities_events = []
for markable in root.find("Markables").getchildren():
# Don't know what this is, skip it
if markable.tag == "UNKNOWN_INSTANCE_TAG":
continue
mention_id = int(markable.attrib["m_id"])
# there are markables without spans, these are descriptions of entities / events which we want to keep
if "TAG_DESCRIPTOR" in markable.attrib.keys():
if "instance_id" in markable.attrib.keys():
entities_events.append({
EVENT: markable.attrib["instance_id"],
DESCRIPTION: markable.attrib["TAG_DESCRIPTOR"]
})
continue
token_ids = [int(anchor.attrib["t_id"]) for anchor in markable.iter("token_anchor")]
token_ids_from, token_ids_to = min(token_ids), max(token_ids)
# the token_ids are cumulative token indexes, remove their cumulative nature
token_indexes = contents.index.get_level_values(TOKEN_IDX).values
token_idx_from = token_indexes[
token_ids_from - 1] # -1 because token_ids start at 1, so we need to access index 0 in the dataframe to find t_id 1
token_idx_to = token_indexes[
token_ids_to - 1] + 1 # additionally +1 here because we want mention spans represented as intervals [from, to[
sentence_idx = contents.index.get_level_values(SENTENCE_IDX).values[token_ids_from - 1]
# resolve non-contiguous mentions
is_non_contiguous_mention = len(token_ids) < token_idx_from - token_idx_to
if is_non_contiguous_mention:
logger.info("Converted non-contiguous mention to contiguous mention.")
mentions_index.append((doc_id, mention_id))
mentions_rows.append({SENTENCE_IDX: sentence_idx,
TOKEN_IDX_FROM: token_idx_from,
TOKEN_IDX_TO: token_idx_to,
MENTION_TYPE: markable.tag})
mentions_index = pd.MultiIndex.from_tuples(mentions_index, names=[DOCUMENT_ID, MENTION_ID])
mentions = pd.DataFrame(mentions_rows, index=mentions_index)
entities_events = pd.DataFrame(entities_events).set_index(EVENT)
# 4. read relations (clusters)
clusters_rows = []
for relation in root.find("Relations").getchildren():
tags_of_interest = ["CROSS_DOC_COREF", "INTRA_DOC_COREF"]
if not relation.tag in tags_of_interest:
logger.info("Unexpected tag " + relation.tag)
raise NotImplementedError
# There are relations with tags INTRA_DOC_COREF and CROSS_DOC_COREF. The cross-doc ones have a "note" attribute.
if "note" in relation.attrib:
# this is the case for CROSS_DOC_COREF tags
relation_id = relation.attrib["note"]
else:
# this is the case for INTRA_DOC_COREF tags
relation_id = doc_id + "_" + relation.attrib["r_id"]
for mention in relation.iter("source"):
mention_id = int(mention.attrib["m_id"])
clusters_rows.append({EVENT: relation_id, DOCUMENT_ID: doc_id, MENTION_ID: mention_id})
clusters = pd.DataFrame(clusters_rows)
# 5. create relations for singletons
# In ECB plus, there are ACTION_OCCURRENCE markables which are not assigned to a relation. These are singletons. We
# add one entry for each singleton to `clusters` to ensure consistency. Note that the opposite also exists:
# singleton mentions which are marked as participating in a cross-doc coref relation, but there is no second
# mention for this relation.
if clusters.empty:
singletons = mentions.index.to_frame().reset_index(drop=True)
else:
# This can most likely be done in a nicer way using some index difference...
outer = pd.merge(mentions, clusters, left_index=True, right_on=[DOCUMENT_ID, MENTION_ID], how="outer")
singletons = outer.loc[outer[EVENT].isna(), [DOCUMENT_ID, MENTION_ID]]
singletons[EVENT] = "SINGLETON_" + singletons.astype(str).apply("_".join, axis=1)
clusters = clusters.append(singletons, sort=False).reset_index(drop=True)
return documents, contents, mentions, clusters, entities_events
def read_split_data(root: Path, sentence_filter_csv: Optional[Path]):
documents = []
contents = []
mentions = []
clusters = []
entities_events = []
# enumerate files
for root, dirs, files in os.walk(str(root.absolute())):
for file in files:
path = os.path.abspath(os.path.join(root, file))
f_documents, f_contents, f_mentions, f_clusters, f_entities_events = read_xml(path)
documents.append(f_documents)
contents.append(f_contents)
mentions.append(f_mentions)
clusters.append(f_clusters)
entities_events.append(f_entities_events)
documents = pd.concat(documents).sort_index()
contents = pd.concat(contents).sort_index()
mentions = pd.concat(mentions).sort_index()
clusters = pd.concat(clusters, sort=False)
entities_events = pd.concat(entities_events).sort_index()
# assert that every mention participates only in one cluster -> meaning we can just add an 'EVENT' column to each mention
assert clusters.duplicated(subset=[DOCUMENT_ID, MENTION_ID]).value_counts().get(True, 0) == 0
clusters = clusters.set_index([DOCUMENT_ID, MENTION_ID])
mentions = pd.merge(mentions, clusters, left_index=True, right_index=True).sort_index()
# read file which tells us from which sentences we should keep event mentions
if sentence_filter_csv is not None:
sent_filter = pd.read_csv(sentence_filter_csv)
doc_number_and_subtopic = sent_filter["File"].str.split("ecb", expand=True)
doc_number_and_subtopic.columns = [DOCUMENT_NUMBER, SUBTOPIC]
doc_number_and_subtopic[DOCUMENT_NUMBER] = doc_number_and_subtopic[DOCUMENT_NUMBER].astype(int)
doc_number_and_subtopic[SUBTOPIC].replace({"plus": "ecbplus", "": "ecb"}, inplace=True)
sent_filter = pd.concat([sent_filter.drop(columns="File"), doc_number_and_subtopic], axis=1)
sent_filter.rename(columns={"Topic": TOPIC_ID, "Sentence Number": SENTENCE_IDX}, inplace=True)
sent_filter[TOPIC_ID] = sent_filter[TOPIC_ID].astype(str)
sent_filter = sent_filter[[TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, SENTENCE_IDX]]
# the sentence filter file applies to all splits, remove those topics that we don't have in the split we're loading
topics_in_split = documents.index.get_level_values(TOPIC_ID).unique()
sent_filter = sent_filter.loc[sent_filter[TOPIC_ID].isin(topics_in_split)].copy()
# obtain doc-id from topic+subtopic+document number
documents_with_doc_number_in_index = documents.set_index(DOCUMENT_NUMBER, append=True).reset_index(level=DOCUMENT_ID, drop=True).sort_index()
sent_filter[DOCUMENT_ID] = sent_filter[[TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER]].apply(lambda row: documents_with_doc_number_in_index[DOCUMENT_ID].loc[tuple(row.values)], axis=1)
all_mentions_to_keep = []
for doc_id, df in mentions.groupby(DOCUMENT_ID):
sentences_to_keep = sent_filter.loc[sent_filter[DOCUMENT_ID] == doc_id]
# we only remove action phrases and leave the other mentions in place, so that we can potentially mask them for
# analysis, see python.handwritten_baseline.pipeline.data.processing.masking.MentionMaskingStage
is_official_evaluation_sentence = df[SENTENCE_IDX].isin(sentences_to_keep[SENTENCE_IDX])
is_action_mention = df[MENTION_TYPE].isin(MENTION_TYPES_ACTION)
mentions_to_keep = df.loc[is_official_evaluation_sentence | (~is_action_mention)]
all_mentions_to_keep.append(mentions_to_keep)
mentions = pd.concat(all_mentions_to_keep).sort_index()
return documents, contents, mentions, entities_events
|
"""
Django settings for simplesocial project.
Generated by 'django-admin startproject' using Django 2.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLAE_DIR = os.path.join(BASE_DIR, 'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%2^#bfjd61932k^l^fz4ztt+e*9ahjda(7w3xaa0d+^@a&=-6*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'accounts',
'groups',
'posts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'simplesocial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLAE_DIR,],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simplesocial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),]
LOGIN_REDIRECT_URL = 'test'
LOGOUT_REDIRECT_URL = 'thanks'
|
#!/usr/bin/env python
#coding: utf-8
import sys
from common import reverse_items
if len(sys.argv) != 3:
print("Reverse key and value of all pairs")
print(("Usage: ", sys.argv[0], "[input] [output]"))
exit(1)
reverse_items(sys.argv[1], sys.argv[2])
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import textwrap
applianceSelf = os.environ['TOIL_APPLIANCE_SELF']
sdistName = os.environ['_TOIL_SDIST_NAME']
dependencies = ' '.join(['libffi-dev', # For client side encryption for extras with PyNACL
'python3.6',
'python3.6-dev',
'python-dev', # For installing Python packages with native code
'python-pip', # Bootstrap pip, but needs upgrading, see below
'python3-pip',
'libcurl4-openssl-dev',
'libssl-dev',
'wget',
'curl',
'openssh-server',
'mesos=1.0.1-2.0.94.ubuntu1604',
"nodejs", # CWL support for javascript expressions
'rsync',
'screen',
'build-essential', # We need a build environment to build Singularity 3.
'uuid-dev',
'libgpgme11-dev',
'libseccomp-dev',
'pkg-config',
'squashfs-tools',
'cryptsetup',
'git'])
def heredoc(s):
s = textwrap.dedent(s).format(**globals())
return s[1:] if s.startswith('\n') else s
motd = heredoc('''
This is the Toil appliance. You can run your Toil script directly on the appliance.
Run toil <workflow>.py --help to see all options for running your workflow.
For more information see http://toil.readthedocs.io/en/latest/
Copyright (C) 2015-2018 Regents of the University of California
Version: {applianceSelf}
''')
# Prepare motd to be echoed in the Dockerfile using a RUN statement that uses bash's print
motd = ''.join(l + '\\n\\\n' for l in motd.splitlines())
print(heredoc('''
FROM ubuntu:16.04
RUN apt-get -y update --fix-missing && apt-get -y upgrade && apt-get -y install apt-transport-https ca-certificates software-properties-common && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN echo "deb http://repos.mesosphere.io/ubuntu/ xenial main" \
> /etc/apt/sources.list.d/mesosphere.list \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF \
&& echo "deb http://deb.nodesource.com/node_6.x xenial main" \
> /etc/apt/sources.list.d/nodesource.list \
&& apt-key adv --keyserver keyserver.ubuntu.com --recv 68576280
RUN add-apt-repository -y ppa:deadsnakes/ppa
RUN apt-get -y update --fix-missing && \
DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \
DEBIAN_FRONTEND=noninteractive apt-get -y install {dependencies} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN wget https://dl.google.com/go/go1.13.3.linux-amd64.tar.gz && \
tar xvf go1.13.3.linux-amd64.tar.gz && \
mv go/bin/* /usr/bin/ && \
mv go /usr/local/
RUN mkdir -p $(go env GOPATH)/src/github.com/sylabs && \
cd $(go env GOPATH)/src/github.com/sylabs && \
git clone https://github.com/sylabs/singularity.git && \
cd singularity && \
git checkout v3.4.2 && \
./mconfig && \
cd ./builddir && \
make -j4 && \
make install
RUN mkdir /root/.ssh && \
chmod 700 /root/.ssh
ADD waitForKey.sh /usr/bin/waitForKey.sh
ADD customDockerInit.sh /usr/bin/customDockerInit.sh
RUN chmod 777 /usr/bin/waitForKey.sh && chmod 777 /usr/bin/customDockerInit.sh
# The stock pip is too old and can't install from sdist with extras
RUN pip install --upgrade pip==9.0.1
# Default setuptools is too old
RUN pip install --upgrade setuptools==36.5.0
# Include virtualenv, as it is still the recommended way to deploy pipelines
RUN pip install --upgrade virtualenv==15.0.3
# Install s3am (--never-download prevents silent upgrades to pip, wheel and setuptools)
RUN virtualenv --never-download /home/s3am \
&& /home/s3am/bin/pip install s3am==2.0 \
&& ln -s /home/s3am/bin/s3am /usr/local/bin/
# Install statically linked version of docker client
RUN curl https://download.docker.com/linux/static/stable/x86_64/docker-18.06.1-ce.tgz \
| tar -xvzf - --transform='s,[^/]*/,,g' -C /usr/local/bin/ \
&& chmod u+x /usr/local/bin/docker
# Fix for Mesos interface dependency missing on ubuntu
RUN pip install protobuf==3.0.0
# Fix for https://issues.apache.org/jira/browse/MESOS-3793
ENV MESOS_LAUNCHER=posix
# Fix for `screen` (https://github.com/BD2KGenomics/toil/pull/1386#issuecomment-267424561)
ENV TERM linux
# Run bash instead of sh inside of screen
ENV SHELL /bin/bash
RUN echo "defshell -bash" > ~/.screenrc
# An appliance may need to start more appliances, e.g. when the leader appliance launches the
# worker appliance on a worker node. To support this, we embed a self-reference into the image:
ENV TOIL_APPLIANCE_SELF {applianceSelf}
RUN mkdir /var/lib/toil
ENV TOIL_WORKDIR /var/lib/toil
# This component changes most frequently and keeping it last maximizes Docker cache hits.
COPY {sdistName} .
RUN pip install {sdistName}[all]
RUN rm {sdistName}
# We intentionally inherit the default ENTRYPOINT and CMD from the base image, to the effect
# that the running appliance just gives you a shell. To start the Mesos master or slave
# daemons, the user # should override the entrypoint via --entrypoint.
RUN echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/bash.bashrc \
&& printf '{motd}' > /etc/motd
'''))
|
# mypy: allow-untyped-defs
from unittest import mock
import pytest
from tools.ci.tc import decision
@pytest.mark.parametrize("run_jobs,tasks,expected", [
([], {"task-no-schedule-if": {}}, ["task-no-schedule-if"]),
([], {"task-schedule-if-no-run-job": {"schedule-if": {}}}, []),
(["job"],
{"job-present": {"schedule-if": {"run-job": ["other-job", "job"]}}},
["job-present"]),
(["job"], {"job-missing": {"schedule-if": {"run-job": ["other-job"]}}}, []),
(["all"], {"job-all": {"schedule-if": {"run-job": ["other-job"]}}}, ["job-all"]),
(["job"],
{"job-1": {"schedule-if": {"run-job": ["job"]}},
"job-2": {"schedule-if": {"run-job": ["other-job"]}}},
["job-1"]),
])
def test_filter_schedule_if(run_jobs, tasks, expected):
with mock.patch("tools.ci.tc.decision.get_run_jobs",
return_value=run_jobs) as get_run_jobs:
assert (decision.filter_schedule_if({}, tasks) ==
{name: tasks[name] for name in expected})
get_run_jobs.call_count in (0, 1)
@pytest.mark.parametrize("msg,expected", [
("Some initial line\n\ntc-jobs:foo,bar", {"foo", "bar"}),
("Some initial line\n\ntc-jobs:foo, bar", {"foo", "bar"}),
("tc-jobs:foo, bar \nbaz", {"foo", "bar"}),
("tc-jobs:all", {"all"}),
("", set()),
("tc-jobs:foo\ntc-jobs:bar", {"foo"})])
@pytest.mark.parametrize("event", [
{"commits": [{"message": "<message>"}]},
{"pull_request": {"body": "<message>"}}
])
def test_extra_jobs_pr(msg, expected, event):
def sub(obj):
"""Copy obj, except if it's a string with the value <message>
replace it with the value of the msg argument"""
if isinstance(obj, dict):
return {key: sub(value) for (key, value) in obj.items()}
elif isinstance(obj, list):
return [sub(value) for value in obj]
elif obj == "<message>":
return msg
return obj
event = sub(event)
assert decision.get_extra_jobs(event) == expected
|
"""
Main inputs:
(Change for all fields)
"""
eazypath = '/data2/ken/photoz/eazy-photoz/src/eazy '
working_folder = '/data2/ken/EN1_pani'
photometry_catalog = 'en1_phot_with_zspec.fits'
photometry_format = 'fits'
filter_file = 'EN1_filters.res'
translate_file = 'EN1.translate'
zspec_col = 'z_spec'
flux_col = 'flux'
fluxerr_col ='fluxerr'
do_zp = False
do_zp_tests = False
do_subcats = False
do_full = False
do_stellar = False
do_hb = True
do_merge = True
"""
Training parameters
"""
Ncrossval = 1
test_fraction = 0.2
process_outliers = True
correct_extinction = True
"""
Fitting Parameters
(Change only when needed)
"""
# Templates: Any combination of 'eazy', 'swire', 'atlas'
templates = ['eazy', 'atlas', 'cosmos']#, 'swire']#, 'cosmos', 'atlas'] #,'cosmos', 'atlas']
fitting_mode = ['a', '1', '1']
defaults = ['defaults/zphot.eazy',
'defaults/zphot.atlas',
'defaults/zphot.cosmos']
#'defaults/zphot.eazy',
#'defaults/zphot.atlas',
#'defaults/zphot.swire']
stellar_params = 'defaults/zphot.pickles'
additional_errors = [0.0, 0.0, 0.0]
template_error_norm = [1., 1., 1.]
template_error_file = ''
lambda_fit_max = [5., 30., 30.]
"""
Combination Parameters
"""
include_prior = True
fbad_prior = 'mag' # 'flat', 'vol' or 'mag'
#prior_parameter_path = 'en1_i_prior_coeff.npz'
prior_fname = 'pani_mag'
prior_colname = 'pani_mag'
alpha_colname = 'pani_mag'
"""
System Parameters
(Specific system only - fixed after installation)
"""
block_size = 1e4
ncpus = 10
|
from bfstpw.models import ForumThread, ForumPost
from datetime import datetime
from django.conf import settings
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db.models import Count
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.template import Context
import django.utils.timezone
from markdown import markdown
def threadlist(request):
c = Context({"threadlist" :
[{"id":t.id
,"name":t.thread_title
,"poster":t.getOriginalPost().poster
,"replycount":t.postcount
,"lastpage":(t.postcount / getattr(settings, 'BFSTPW_MAX_POSTS_PER_PAGE', 20))+1
,"date":t.mostrecent
,"lastposter":t.getLatestPost().poster
} for t in
ForumThread.objects.sortByLastPost().annotate(postcount=Count('forumpost'))]})
return render(request, 'bfstpw/threadlist.html', c)
def thread(request, thread_id, message=''):
current_thread = get_object_or_404(ForumThread, id=thread_id)
max_posts_per_page = getattr(settings, 'BFSTPW_MAX_POSTS_PER_PAGE', 20)
paginator = Paginator(current_thread.forumpost_set.order_by('date_posted'), max_posts_per_page)
c = Context(
{"threadlist" :
[{"id":t.id
,"name":t.thread_title
} for t in ForumThread.objects.sortByLastPost()],
"thread" : current_thread,
"posts" : paginator.page(request.GET.get('page',1)),
"pages" : paginator.page_range,
"message" : message
})
return render(request, 'bfstpw/thread.html', c)
def post(request, thread_id):
t = get_object_or_404(ForumThread, id=thread_id)
posts = t.forumpost_set
"""
# TODO: don't let users post too quickly
session = request.session
current_time = time()
if (session.get('lastposttime',0) + 10) < current_time:
message_html = markdown(request.POST['message'], safe_mode='escape')
posts.create(poster=request.POST['name'],message_body=message_html,date_posted=datetime.now())
msg = ''
session['lastposttime'] = current_time
else:
msg = "Error: you must wait 10 seconds before posting"
"""
message_html = markdown(request.POST['message'], safe_mode='escape')
posts.create(poster=request.POST['name'], message_body=message_html,
date_posted=django.utils.timezone.now())
pagenum = (posts.count() / getattr(settings, 'BFSTPW_MAX_POSTS_PER_PAGE', 20))+1
return HttpResponseRedirect(reverse('bfstpw-thread', args=(t.id,))+'?page=%d' % pagenum)
def newthreadmake(request):
t = ForumThread(thread_title=request.POST['threadname'])
t.save()
message_html = markdown(request.POST['message'], safe_mode='escape')
t.forumpost_set.create(poster=request.POST['name'],
message_body=message_html,
date_posted=django.utils.timezone.now())
return HttpResponseRedirect(reverse('bfstpw-thread', args=(t.id,)))
|
from lib.utils.util import *
from timm.models.efficientnet_blocks import *
# ChildNet Builder definition.
class ChildNetBuilder:
def __init__(
self,
channel_multiplier=1.0,
channel_divisor=8,
channel_min=None,
output_stride=32,
pad_type='',
act_layer=None,
se_kwargs=None,
norm_layer=nn.BatchNorm2d,
norm_kwargs=None,
drop_path_rate=0.,
feature_location='',
verbose=False,
logger=None):
self.channel_multiplier = channel_multiplier
self.channel_divisor = channel_divisor
self.channel_min = channel_min
self.output_stride = output_stride
self.pad_type = pad_type
self.act_layer = act_layer
self.se_kwargs = se_kwargs
self.norm_layer = norm_layer
self.norm_kwargs = norm_kwargs
self.drop_path_rate = drop_path_rate
self.feature_location = feature_location
assert feature_location in ('pre_pwl', 'post_exp', '')
self.verbose = verbose
self.in_chs = None
self.features = OrderedDict()
self.logger = logger
def _round_channels(self, chs):
return round_channels(
chs,
self.channel_multiplier,
self.channel_divisor,
self.channel_min)
def _make_block(self, ba, block_idx, block_count):
drop_path_rate = self.drop_path_rate * block_idx / block_count
bt = ba.pop('block_type')
ba['in_chs'] = self.in_chs
ba['out_chs'] = self._round_channels(ba['out_chs'])
if 'fake_in_chs' in ba and ba['fake_in_chs']:
ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs'])
ba['norm_layer'] = self.norm_layer
ba['norm_kwargs'] = self.norm_kwargs
ba['pad_type'] = self.pad_type
# block act fn overrides the model default
ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer
assert ba['act_layer'] is not None
if bt == 'ir':
ba['drop_path_rate'] = drop_path_rate
ba['se_kwargs'] = self.se_kwargs
if self.verbose:
self.logger.info(
' InvertedResidual {}, Args: {}'.format(
block_idx, str(ba)))
block = InvertedResidual(**ba)
elif bt == 'ds' or bt == 'dsa':
ba['drop_path_rate'] = drop_path_rate
ba['se_kwargs'] = self.se_kwargs
if self.verbose:
self.logger.info(
' DepthwiseSeparable {}, Args: {}'.format(
block_idx, str(ba)))
block = DepthwiseSeparableConv(**ba)
elif bt == 'cn':
if self.verbose:
self.logger.info(
' ConvBnAct {}, Args: {}'.format(
block_idx, str(ba)))
block = ConvBnAct(**ba)
else:
assert False, 'Uknkown block type (%s) while building model.' % bt
self.in_chs = ba['out_chs'] # update in_chs for arg of next block
return block
def __call__(self, in_chs, model_block_args):
""" Build the blocks
Args:
in_chs: Number of input-channels passed to first block
model_block_args: A list of lists, outer list defines stages, inner
list contains strings defining block configuration(s)
Return:
List of block stacks (each stack wrapped in nn.Sequential)
"""
if self.verbose:
self.logger.info(
'Building model trunk with %d stages...' %
len(model_block_args))
self.in_chs = in_chs
total_block_count = sum([len(x) for x in model_block_args])
total_block_idx = 0
current_stride = 2
current_dilation = 1
feature_idx = 0
stages = []
# outer list of block_args defines the stacks ('stages' by some
# conventions)
for stage_idx, stage_block_args in enumerate(model_block_args):
last_stack = stage_idx == (len(model_block_args) - 1)
if self.verbose:
self.logger.info('Stack: {}'.format(stage_idx))
assert isinstance(stage_block_args, list)
blocks = []
# each stack (stage) contains a list of block arguments
for block_idx, block_args in enumerate(stage_block_args):
last_block = block_idx == (len(stage_block_args) - 1)
extract_features = '' # No features extracted
if self.verbose:
self.logger.info(' Block: {}'.format(block_idx))
# Sort out stride, dilation, and feature extraction details
assert block_args['stride'] in (1, 2)
if block_idx >= 1:
# only the first block in any stack can have a stride > 1
block_args['stride'] = 1
do_extract = False
if self.feature_location == 'pre_pwl':
if last_block:
next_stage_idx = stage_idx + 1
if next_stage_idx >= len(model_block_args):
do_extract = True
else:
do_extract = model_block_args[next_stage_idx][0]['stride'] > 1
elif self.feature_location == 'post_exp':
if block_args['stride'] > 1 or (last_stack and last_block):
do_extract = True
if do_extract:
extract_features = self.feature_location
next_dilation = current_dilation
if block_args['stride'] > 1:
next_output_stride = current_stride * block_args['stride']
if next_output_stride > self.output_stride:
next_dilation = current_dilation * block_args['stride']
block_args['stride'] = 1
if self.verbose:
self.logger.info(
' Converting stride to dilation to maintain output_stride=={}'.format(
self.output_stride))
else:
current_stride = next_output_stride
block_args['dilation'] = current_dilation
if next_dilation != current_dilation:
current_dilation = next_dilation
# create the block
block = self._make_block(
block_args, total_block_idx, total_block_count)
blocks.append(block)
# stash feature module name and channel info for model feature
# extraction
if extract_features:
feature_module = block.feature_module(extract_features)
if feature_module:
feature_module = 'blocks.{}.{}.'.format(
stage_idx, block_idx) + feature_module
feature_channels = block.feature_channels(extract_features)
self.features[feature_idx] = dict(
name=feature_module,
num_chs=feature_channels
)
feature_idx += 1
# incr global block idx (across all stacks)
total_block_idx += 1
stages.append(nn.Sequential(*blocks))
return stages
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.