text
stringlengths 2
999k
|
|---|
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
# Weight Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_saved_model('./saved_model')
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
tflite_quant_model = converter.convert()
with open('./mobilenet_v3_large_224_dm07_weight_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Weight Quantization complete! - mobilenet_v3_large_224_dm07_weight_quant.tflite")
|
from typing import List
from . import AccessRequest, FileID, PartSpec, PartsGenerator
class NonCorrelatedSchemesGenerator(object):
def __init__(self, number: int, fraction: float) -> None:
self._number: int = number
self._fraction: float = fraction
self._parts_number: int = 2 ** number
@property
def number(self) -> int:
return self._number
@property
def fraction(self) -> float:
return self._fraction
def parts(self, index: int, total_bytes: int) -> List[PartSpec]:
scheme_parts_number = 2 ** (self._number - 1)
parts: List[PartSpec] = []
for i in range(scheme_parts_number):
# Insert 1 bit at index into binary representation of i
part_index = (((i << 1 >> index) | 1) << index) | (i & ((1 << index) - 1))
containing_schemes = bin(part_index).count('1')
part_bytes = round(total_bytes * (
self._fraction ** containing_schemes
*
(1 - self._fraction) ** (self._number - containing_schemes)
))
parts.append((part_index, part_bytes))
return parts
def access_request(self, index: int, file: FileID, total_bytes: int) -> AccessRequest:
return AccessRequest(file, self.parts(index, total_bytes))
class WithIndex(PartsGenerator):
def __init__(self, generator: 'NonCorrelatedSchemesGenerator', index: int) -> None:
self._generator: NonCorrelatedSchemesGenerator = generator
self._index: int = index
def parts(self, total_bytes: int) -> List[PartSpec]:
return self._generator.parts(self._index, total_bytes)
def access_request(self, file: FileID, total_bytes: int) -> AccessRequest:
return self._generator.access_request(self._index, file, total_bytes)
def with_index(self, index: int) -> WithIndex:
return self.WithIndex(self, index)
|
from __future__ import absolute_import
import six
from sentry.api.serializers import Serializer, register
from sentry.models import Monitor
@register(Monitor)
class MonitorSerializer(Serializer):
def serialize(self, obj, attrs, user):
return {
'id': six.text_type(obj.guid),
'status': obj.get_status_display(),
'name': obj.name,
'dateCreated': obj.date_added,
}
|
import requests
headers = {
'X-Api-Key': '123456789',
}
params = {
'test': '2',
'limit': '100',
'w': '4',
}
response = requests.get('http://localhost:28139/synthetics/api/v3/monitors', params=params, headers=headers)
|
import time
from datetime import datetime
from requests import Response
import json
import os
import TunnelSSH
import Helpers
import SettingsStorage
import sys
def create_tunnel(tun: dict):
timeout_time: datetime = datetime.fromisoformat(tun["timeout_time"])
if timeout_time <= datetime.utcnow():
Helpers.log_that("Tunnel already not valid")
return
if tun["connection_type"] == Helpers.ConnectionTypeEnum.ssh_tunnel:
TunnelSSH.create_ssh_tunnel(tun)
def destroy_tunnel(tun: dict):
if tun["connection_type"] == Helpers.ConnectionTypeEnum.ssh_tunnel:
TunnelSSH.destroy_ssh_tunnel(tun)
def destroy_expired_tunnels():
for tun in SettingsStorage.datajson["tunnels"]:
timeout_time: datetime = datetime.fromisoformat(tun["timeout_time"])
if timeout_time <= datetime.utcnow():
Helpers.log_that("A tunnel has expired, destroy")
destroy_tunnel(tun)
def act_on_tunnel(tun: dict):
Helpers.log_that(tun)
tunnel_id: int = tun["id"]
type: Helpers.ConnectionTypeEnum = tun["connection_type"]
state: Helpers.ConnectionStateEnum = tun["connection_state"]
port_to_tunnel: int = tun["port_to_tunnel"]
timeout_time: datetime = tun["timeout_time"]
temporaray_pubkey: str = tun["temporary_pubkey_for_agent_ssh"]
remote_ssh_server: str = tun["remote_ssh_server"]
remote_ssh_fingerprint: str = tun["remote_ssh_fingerprint"]
remote_ssh_username: str = tun["remote_ssh_fingerprint"]
reverse_port: int = tun["reverse_port"]
remote_ssh_port: int = tun["remote_ssh_port"]
temporary_tunnel_privkey: str = tun["temporary_tunnel_privkey"]
if state == Helpers.ConnectionStateEnum.connected:
# first check what should we do:
Helpers.log_that("Requesting connection that should already be connected, ignore")
return
elif state == Helpers.ConnectionStateEnum.requested:
Helpers.log_that("Requesting new connection, act upon that!")
create_tunnel(tun)
elif state == Helpers.ConnectionStateEnum.disconnect_requested:
Helpers.log_that("Requesting to destroy the connection id {}".format(tunnel_id))
destroy_tunnel(tun)
def parse_success_resp(resp: Response):
j: dict = resp.json()
keys = j.keys()
if "message" in keys and len(j["message"]) > 0:
Helpers.log_that(j["message"])
if "tunnels_requesting_action" in keys and len(j["tunnels_requesting_action"]) > 0:
Helpers.log_that("There are {} tunnels requesting action:".format(len(j["tunnels_requesting_action"])))
for tun in j["tunnels_requesting_action"]:
act_on_tunnel(tun)
def main():
# Our small local "db" consisting of Tunnels which are active
while True: # Do this all the time
try:
# First check if this is installed, if not, send the installation data
if not SettingsStorage.is_installed:
resp: Response = Helpers.ReqSession.post(SettingsStorage.server_url + "/agents/agent_install",
json=Helpers.get_install_json())
if resp.status_code == 200:
SettingsStorage.is_installed = True
SettingsStorage.datajson["is_installed"] = True
Helpers.log_that("Successfully Installed!")
else:
msg = ""
if "detail" in resp.json().keys():
msg = resp.json()["detail"]
Helpers.log_that(
"Error when trying to install the agent. Code {}, with message {}".format(str(resp.status_code), msg))
# First check if we have any Tunnel that should be disconnected TBD
destroy_expired_tunnels()
Helpers.remove_expired_ssh_auth_keys()
resp: Response = Helpers.ReqSession.post(SettingsStorage.server_url + "/agents/query", json=Helpers.get_query_json())
if resp.status_code == 200:
parse_success_resp(resp)
else:
msg = ""
if "detail" in resp.json().keys():
msg = resp.json()["detail"]
Helpers.log_that(
"Error when querying the API. Code {}, with message {}".format(str(resp.status_code), msg))
except ValueError as e:
Helpers.log_that("Could not process some value" + str(e.args))
except Exception as e:
Helpers.log_that("Could not connect to server " + str(e.args))
datafile = open(os.path.join(sys.path[0], "data.json"), "w")
json.dump(SettingsStorage.datajson, datafile)
datafile.close()
time.sleep(SettingsStorage.interval_seconds)
if __name__ == "__main__":
# execute only if run as a script
main()
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from cotede.humanqc.humaneval import HumanQC
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin_tempest_plugin.common import constants
from senlin_tempest_plugin.common import utils
from senlin_tempest_plugin.functional import base
class TestScalingPolicy(base.BaseSenlinFunctionalTest):
def setUp(self):
super(TestScalingPolicy, self).setUp()
self.profile_id = utils.create_a_profile(self)
self.addCleanup(utils.delete_a_profile, self, self.profile_id)
self.cluster_id = utils.create_a_cluster(self, self.profile_id,
min_size=0, max_size=5,
desired_capacity=1)
self.addCleanup(utils.delete_a_cluster, self, self.cluster_id)
@decorators.attr(type=['functional'])
@decorators.idempotent_id('6b513a5d-75b6-447a-b95d-e17b84ac9ee8')
def test_scaling_policy(self):
# Create a scaling policy targets on CLUSTER_SCALE_OUT action
spec = constants.spec_scaling_policy
spec['properties'] = {
'event': 'CLUSTER_SCALE_OUT',
'adjustment': {
'type': 'CHANGE_IN_CAPACITY',
'number': 2,
'min_step': 1,
'best_effort': True
}
}
policy_id = utils.create_a_policy(self, spec)
scaleout_policy = utils.get_a_policy(self, policy_id)
self.addCleanup(utils.delete_a_policy, self, scaleout_policy['id'])
# Create a scaling policy targets on CLUSTER_SCALE_IN action
spec['properties'] = {
'event': 'CLUSTER_SCALE_IN',
'adjustment': {
'type': 'CHANGE_IN_PERCENTAGE',
'number': 50,
'min_step': 2,
'best_effort': False
}
}
policy_id = utils.create_a_policy(self, spec)
scalein_policy = utils.get_a_policy(self, policy_id)
self.addCleanup(utils.delete_a_policy, self, scalein_policy['id'])
# Attach scale in/out policies to cluster
for policy in [scaleout_policy, scalein_policy]:
utils.cluster_attach_policy(self, self.cluster_id, policy['id'])
self.addCleanup(utils.cluster_detach_policy, self,
self.cluster_id, policy['id'])
# Scale out cluster without count specified
utils.cluster_scale_out(self, self.cluster_id)
# Verify scale out result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(3, cluster['desired_capacity'])
self.assertEqual(3, len(cluster['nodes']))
# Scale out cluster with count set to 1
utils.cluster_scale_out(self, self.cluster_id, count=1)
# Verify scale out result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(4, cluster['desired_capacity'])
self.assertEqual(4, len(cluster['nodes']))
# Keep scaling out cluster with count set to 2 to
# verify best_effort parameter
utils.cluster_scale_out(self, self.cluster_id, count=2)
# Verify scale out result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(5, cluster['desired_capacity'])
self.assertEqual(5, len(cluster['nodes']))
# Scale in cluster without count specified
utils.cluster_scale_in(self, self.cluster_id)
# Verify scale in result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(3, cluster['desired_capacity'])
self.assertEqual(3, len(cluster['nodes']))
# Scale in cluster without count specified to
# verify min_step parameter
utils.cluster_scale_in(self, self.cluster_id)
# Verify scale in result
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(1, cluster['desired_capacity'])
self.assertEqual(1, len(cluster['nodes']))
# Keep scaling in cluster with count set to 2 to
# verify best_effort parameter
res = utils.cluster_scale_in(self, self.cluster_id, count=2,
expected_status='FAILED')
# Verify action result and action failure reason
cluster = utils.get_a_cluster(self, self.cluster_id)
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(1, cluster['desired_capacity'])
self.assertEqual(1, len(cluster['nodes']))
reason = ("Policy check failure: Failed policy '%s': The target "
"capacity (-1) is less than the cluster's "
"min_size (0).") % scalein_policy['name']
self.assertEqual(reason, res)
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
import copy
import os
import re
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import html_validation_service
from core.domain import param_domain
from core.domain import state_domain
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
def mock_get_filename_with_dimensions(filename, unused_exp_id):
return html_validation_service.regenerate_image_filename_using_dimensions(
filename, 490, 120)
class ExplorationChangeTests(test_utils.GenericTestBase):
def test_exp_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
exp_domain.ExplorationChange({'invalid': 'data'})
def test_exp_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
exp_domain.ExplorationChange({'cmd': 'invalid'})
def test_exp_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_value')):
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'property_name': 'content',
'old_value': 'old_value'
})
def test_exp_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'old_state_name',
'new_state_name': 'new_state_name',
'invalid': 'invalid'
})
def test_exp_change_object_with_invalid_exploration_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_exploration_property: '
'invalid is not allowed')):
exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_exp_change_object_with_invalid_state_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_state_property: '
'invalid is not allowed')):
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'state_name',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_exp_change_object_with_create_new(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'create_new',
'category': 'category',
'title': 'title'
})
self.assertEqual(exp_change_object.cmd, 'create_new')
self.assertEqual(exp_change_object.category, 'category')
self.assertEqual(exp_change_object.title, 'title')
def test_exp_change_object_with_add_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'state_name',
})
self.assertEqual(exp_change_object.cmd, 'add_state')
self.assertEqual(exp_change_object.state_name, 'state_name')
def test_exp_change_object_with_rename_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'old_state_name',
'new_state_name': 'new_state_name'
})
self.assertEqual(exp_change_object.cmd, 'rename_state')
self.assertEqual(exp_change_object.old_state_name, 'old_state_name')
self.assertEqual(exp_change_object.new_state_name, 'new_state_name')
def test_exp_change_object_with_delete_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'state_name',
})
self.assertEqual(exp_change_object.cmd, 'delete_state')
self.assertEqual(exp_change_object.state_name, 'state_name')
def test_exp_change_object_with_edit_state_property(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'state_name',
'property_name': 'content',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(exp_change_object.cmd, 'edit_state_property')
self.assertEqual(exp_change_object.state_name, 'state_name')
self.assertEqual(exp_change_object.property_name, 'content')
self.assertEqual(exp_change_object.new_value, 'new_value')
self.assertEqual(exp_change_object.old_value, 'old_value')
def test_exp_change_object_with_edit_exploration_property(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(exp_change_object.cmd, 'edit_exploration_property')
self.assertEqual(exp_change_object.property_name, 'title')
self.assertEqual(exp_change_object.new_value, 'new_value')
self.assertEqual(exp_change_object.old_value, 'old_value')
def test_exp_change_object_with_migrate_states_schema_to_latest_version(
self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'migrate_states_schema_to_latest_version',
'from_version': 'from_version',
'to_version': 'to_version',
})
self.assertEqual(
exp_change_object.cmd, 'migrate_states_schema_to_latest_version')
self.assertEqual(exp_change_object.from_version, 'from_version')
self.assertEqual(exp_change_object.to_version, 'to_version')
def test_exp_change_object_with_revert_commit(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 'version_number'
})
self.assertEqual(
exp_change_object.cmd,
exp_models.ExplorationModel.CMD_REVERT_COMMIT)
self.assertEqual(exp_change_object.version_number, 'version_number')
def test_to_dict(self):
exp_change_dict = {
'cmd': 'create_new',
'title': 'title',
'category': 'category'
}
exp_change_object = exp_domain.ExplorationChange(exp_change_dict)
self.assertEqual(exp_change_object.to_dict(), exp_change_dict)
class ExplorationVersionsDiffDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration versions difference domain object."""
def setUp(self):
super(ExplorationVersionsDiffDomainUnitTests, self).setUp()
self.exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, self.exp_id,
assets_list)
self.exploration = exp_fetchers.get_exploration_by_id(self.exp_id)
def test_correct_creation_of_version_diffs(self):
# Rename a state.
self.exploration.rename_state('Home', 'Renamed state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(
exp_versions_diff.old_to_new_state_names, {
'Home': 'Renamed state'
})
self.exploration.version += 1
# Add a state.
self.exploration.add_states(['New state'])
self.exploration.states['New state'] = copy.deepcopy(
self.exploration.states['Renamed state'])
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, ['New state'])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Delete state.
self.exploration.delete_state('New state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, ['New state'])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test addition and multiple renames.
self.exploration.add_states(['New state'])
self.exploration.states['New state'] = copy.deepcopy(
self.exploration.states['Renamed state'])
self.exploration.rename_state('New state', 'New state2')
self.exploration.rename_state('New state2', 'New state3')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, ['New state3'])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test addition, rename and deletion.
self.exploration.add_states(['New state 2'])
self.exploration.rename_state('New state 2', 'Renamed state 2')
self.exploration.delete_state('Renamed state 2')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state 2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state 2',
'new_state_name': 'Renamed state 2'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 2'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test multiple renames and deletion.
self.exploration.rename_state('New state3', 'Renamed state 3')
self.exploration.rename_state('Renamed state 3', 'Renamed state 4')
self.exploration.delete_state('Renamed state 4')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state3',
'new_state_name': 'Renamed state 3'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Renamed state 3',
'new_state_name': 'Renamed state 4'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 4'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(
exp_versions_diff.deleted_state_names, ['New state3'])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
def test_cannot_create_exploration_change_with_invalid_change_dict(self):
with self.assertRaisesRegexp(
Exception, 'Missing cmd key in change dict'):
exp_domain.ExplorationChange({
'invalid_cmd': 'invalid'
})
def test_cannot_create_exploration_change_with_invalid_cmd(self):
with self.assertRaisesRegexp(
Exception, 'Command invalid_cmd is not allowed'):
exp_domain.ExplorationChange({
'cmd': 'invalid_cmd'
})
def test_cannot_create_exploration_change_with_invalid_state_property(self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'state_name': '',
'new_value': ''
})
self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange))
with self.assertRaisesRegexp(
Exception,
'Value for property_name in cmd edit_state_property: '
'invalid_property is not allowed'):
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'invalid_property',
'state_name': '',
'new_value': ''
})
def test_cannot_create_exploration_change_with_invalid_exploration_property(
self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': ''
})
self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange))
with self.assertRaisesRegexp(
Exception,
'Value for property_name in cmd edit_exploration_property: '
'invalid_property is not allowed'):
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'invalid_property',
'new_value': ''
})
def test_revert_exploration_commit(self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 1
})
self.assertEqual(exp_change.version_number, 1)
exp_change = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 2
})
self.assertEqual(exp_change.version_number, 2)
class ExpVersionReferenceTests(test_utils.GenericTestBase):
def test_create_exp_version_reference_object(self):
exp_version_reference = exp_domain.ExpVersionReference('exp_id', 1)
self.assertEqual(
exp_version_reference.to_dict(), {
'exp_id': 'exp_id',
'version': 1
})
def test_validate_exp_version(self):
with self.assertRaisesRegexp(
Exception,
'Expected version to be an int, received invalid_version'):
exp_domain.ExpVersionReference('exp_id', 'invalid_version')
def test_validate_exp_id(self):
with self.assertRaisesRegexp(
Exception, 'Expected exp_id to be a str, received 0'):
exp_domain.ExpVersionReference(0, 1)
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = state_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = state_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome_dict = init_state.interaction.default_outcome.to_dict()
default_outcome_dict['dest'] = exploration.init_state_name
init_state.update_interaction_default_outcome(default_outcome_dict)
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
old_answer_groups = copy.deepcopy(init_state.interaction.answer_groups)
old_answer_groups.append({
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
})
init_state.update_interaction_answer_groups(old_answer_groups)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = state_domain.SubtitledHtml('feedback_1', '')
exploration.validate()
outcome.labelled_as_correct = 'hello'
self._assert_validation_error(
exploration, 'The "labelled_as_correct" field should be a boolean')
# Test that labelled_as_correct must be False for self-loops, and that
# this causes a strict validation failure but not a normal validation
# failure.
outcome.labelled_as_correct = True
with self.assertRaisesRegexp(
Exception, 'is labelled correct but is a self-loop.'
):
exploration.validate(strict=True)
exploration.validate()
outcome.labelled_as_correct = False
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = [param_domain.ParamChange(
0, 'generator_id', {})]
self._assert_validation_error(
exploration,
'Expected param_change name to be a string, received 0')
outcome.param_changes = []
exploration.validate()
outcome.refresher_exploration_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome refresher_exploration_id to be a string')
outcome.refresher_exploration_id = None
exploration.validate()
outcome.refresher_exploration_id = 'valid_string'
exploration.validate()
outcome.missing_prerequisite_skill_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome missing_prerequisite_skill_id to be a string')
outcome.missing_prerequisite_skill_id = None
exploration.validate()
outcome.missing_prerequisite_skill_id = 'valid_string'
exploration.validate()
# Test that refresher_exploration_id must be None for non-self-loops.
new_state_name = 'New state'
exploration.add_states([new_state_name])
outcome.dest = new_state_name
outcome.refresher_exploration_id = 'another_string'
self._assert_validation_error(
exploration,
'has a refresher exploration ID, but is not a self-loop')
outcome.refresher_exploration_id = None
exploration.validate()
exploration.delete_state(new_state_name)
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
interaction.id = 'TextInput'
init_state.update_interaction_default_outcome(None)
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
init_state.update_interaction_answer_groups([])
exploration.validate()
# Restore a valid exploration.
interaction.id = 'TextInput'
answer_groups_list = [
answer_group.to_dict() for answer_group in answer_groups]
init_state.update_interaction_answer_groups(answer_groups_list)
init_state.update_interaction_default_outcome(default_outcome.to_dict())
exploration.validate()
init_state.update_interaction_solution({
'answer_is_exclusive': True,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': 'hello_world is a string'
}
})
self._assert_validation_error(
exploration,
re.escape('Hint(s) must be specified if solution is specified'))
init_state.update_interaction_solution(None)
interaction.hints = {}
self._assert_validation_error(
exploration, 'Expected hints to be a list')
interaction.hints = []
# Validate AnswerGroup.
answer_groups_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 1
}
init_state.update_interaction_answer_groups([answer_groups_dict])
self._assert_validation_error(
exploration,
'Expected tagged skill misconception id to be a str, received 1')
answer_groups_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id':
'invalid_tagged_skill_misconception_id'
}
init_state.update_interaction_answer_groups([answer_groups_dict])
self._assert_validation_error(
exploration,
'Expected the format of tagged skill misconception id '
'to be <skill_id>-<misconception_id>, received '
'invalid_tagged_skill_misconception_id')
init_state.interaction.answer_groups[0].rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
first_answer_group = init_state.interaction.answer_groups[0]
first_answer_group.tagged_skill_misconception_id = None
first_answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule or training data for each'
' answer group.')
exploration.states = {
exploration.init_state_name: (
state_domain.State.create_default_state(
exploration.init_state_name))
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.update_interaction_default_outcome(None)
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration = exp_fetchers.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_get_trainable_states_dict(self):
"""Test the get_trainable_states_dict() method."""
exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=False)
old_states = exp_fetchers.get_exploration_from_model(
exploration_model).states
exploration = exp_fetchers.get_exploration_by_id(exp_id)
# Rename a state to add it in unchanged answer group.
exploration.rename_state('Home', 'Renamed state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
expected_dict = {
'state_names_with_changed_answer_groups': [],
'state_names_with_unchanged_answer_groups': ['Renamed state']
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Modify answer groups to trigger change in answer groups.
state = exploration.states['Renamed state']
exploration.states['Renamed state'].interaction.answer_groups.insert(
3, state.interaction.answer_groups[3])
answer_groups = []
for answer_group in state.interaction.answer_groups:
answer_groups.append(answer_group.to_dict())
change_list = [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Renamed state',
'property_name': 'answer_groups',
'new_value': answer_groups
})]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Add new state to trigger change in answer groups.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
})]
expected_dict = {
'state_names_with_changed_answer_groups': [
'New state', 'Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Delete state.
exploration.delete_state('New state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Test addition and multiple renames.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
exploration.rename_state('New state', 'New state2')
exploration.rename_state('New state2', 'New state3')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
})]
expected_dict = {
'state_names_with_changed_answer_groups': [
'Renamed state', 'New state3'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_has_state_name(self):
"""Test for has_state_name."""
demo = exp_domain.Exploration.create_default_exploration('0')
state_names = demo.states.keys()
self.assertEqual(state_names, ['Introduction'])
self.assertEqual(demo.has_state_name('Introduction'), True)
self.assertEqual(demo.has_state_name('Fake state name'), False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
def test_cannot_create_demo_exp_with_invalid_param_changes(self):
demo_exp = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo_exp.to_dict()
new_state = state_domain.State.create_default_state('new_state_name')
new_state.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'myParam',
'generator_id': 'RandomSelector'
})]
demo_dict['states']['new_state_name'] = new_state.to_dict()
demo_dict['param_specs'] = {
'ParamSpec': {'obj_type': 'UnicodeString'}
}
with self.assertRaisesRegexp(
Exception,
'Parameter myParam was used in a state but not '
'declared in the exploration param_specs.'):
exp_domain.Exploration.from_dict(demo_dict)
def test_validate_exploration_category(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.category = 1
with self.assertRaisesRegexp(
Exception, 'Expected category to be a string, received 1'):
exploration.validate()
def test_validate_exploration_objective(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.objective = 1
with self.assertRaisesRegexp(
Exception, 'Expected objective to be a string, received 1'):
exploration.validate()
def test_validate_exploration_blurb(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.blurb = 1
with self.assertRaisesRegexp(
Exception, 'Expected blurb to be a string, received 1'):
exploration.validate()
def test_validate_exploration_language_code(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.language_code = 1
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be a string, received 1'):
exploration.validate()
def test_validate_exploration_author_notes(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.author_notes = 1
with self.assertRaisesRegexp(
Exception, 'Expected author_notes to be a string, received 1'):
exploration.validate()
def test_validate_exploration_states(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.states = 1
with self.assertRaisesRegexp(
Exception, 'Expected states to be a dict, received 1'):
exploration.validate()
def test_validate_exploration_outcome_dest(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.init_state.interaction.default_outcome.dest = None
with self.assertRaisesRegexp(
Exception, 'Every outcome should have a destination.'):
exploration.validate()
def test_validate_exploration_outcome_dest_type(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.init_state.interaction.default_outcome.dest = 1
with self.assertRaisesRegexp(
Exception, 'Expected outcome dest to be a string, received 1'):
exploration.validate()
def test_validate_exploration_states_schema_version(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.states_schema_version = None
with self.assertRaisesRegexp(
Exception, 'This exploration has no states schema version.'):
exploration.validate()
def test_validate_exploration_auto_tts_enabled(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.auto_tts_enabled = 1
with self.assertRaisesRegexp(
Exception, 'Expected auto_tts_enabled to be a bool, received 1'):
exploration.validate()
def test_validate_exploration_correctness_feedback_enabled(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.correctness_feedback_enabled = 1
with self.assertRaisesRegexp(
Exception,
'Expected correctness_feedback_enabled to be a bool, received 1'):
exploration.validate()
def test_validate_exploration_param_specs(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_specs = {
1: param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
with self.assertRaisesRegexp(
Exception, 'Expected parameter name to be a string, received 1'):
exploration.validate()
def test_validate_exploration_param_changes_type(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = 1
with self.assertRaisesRegexp(
Exception, 'Expected param_changes to be a list, received 1'):
exploration.validate()
def test_validate_exploration_param_name(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'invalid',
'generator_id': 'RandomSelector'
})]
with self.assertRaisesRegexp(
Exception,
'No parameter named \'invalid\' exists in this '
'exploration'):
exploration.validate()
def test_validate_exploration_reserved_param_name(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'all',
'generator_id': 'RandomSelector'
})]
with self.assertRaisesRegexp(
Exception,
'The exploration-level parameter with name \'all\' is '
'reserved. Please choose a different name.'):
exploration.validate()
def test_validate_exploration_is_non_self_loop(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.add_states(['DEF'])
default_outcome_dict = {
'dest': 'DEF',
'feedback': {
'content_id': 'default_outcome',
'html': '<p>Default outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': 'refresher_exploration_id',
'missing_prerequisite_skill_id': None
}
exploration.init_state.update_interaction_default_outcome(
default_outcome_dict)
with self.assertRaisesRegexp(
Exception,
'The default outcome for state Introduction has a refresher '
'exploration ID, but is not a self-loop.'):
exploration.validate()
def test_validate_exploration_answer_group_parameter(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration.validate()
param_changes = [{
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'ParamChange',
'generator_id': 'RandomSelector'
}]
answer_groups = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': param_changes,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
exploration.init_state.update_interaction_answer_groups(answer_groups)
with self.assertRaisesRegexp(
Exception,
'The parameter ParamChange was used in an answer group, '
'but it does not exist in this exploration'):
exploration.validate()
def test_verify_all_states_reachable(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'owner_id')
exploration.validate()
exploration.add_states(['End'])
end_state = exploration.states['End']
end_state.update_interaction_id('EndExploration')
end_state.update_interaction_default_outcome(None)
with self.assertRaisesRegexp(
Exception,
'Please fix the following issues before saving this exploration: '
'1. The following states are not reachable from the initial state: '
'End 2. It is impossible to complete the exploration from the '
'following states: Introduction'):
exploration.validate(strict=True)
def test_update_init_state_name_with_invalid_state(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
exploration.update_init_state_name('End')
self.assertEqual(exploration.init_state_name, 'End')
with self.assertRaisesRegexp(
Exception,
'Invalid new initial state name: invalid_state;'):
exploration.update_init_state_name('invalid_state')
def test_rename_state_with_invalid_state(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
self.assertTrue(exploration.states.get('End'))
self.assertFalse(exploration.states.get('new state name'))
exploration.rename_state('End', 'new state name')
self.assertFalse(exploration.states.get('End'))
self.assertTrue(exploration.states.get('new state name'))
with self.assertRaisesRegexp(
Exception, 'State invalid_state does not exist'):
exploration.rename_state('invalid_state', 'new state name')
def test_default_outcome_is_labelled_incorrect_for_self_loop(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='title', category='category',
objective='objective', end_state_name='End')
exploration.validate(strict=True)
(exploration.init_state.interaction.default_outcome
.labelled_as_correct) = True
(exploration.init_state.interaction.default_outcome
.dest) = exploration.init_state_name
with self.assertRaisesRegexp(
Exception,
'The default outcome for state Introduction is labelled '
'correct but is a self-loop'):
exploration.validate(strict=True)
class ExplorationSummaryTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationSummaryTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
exploration = exp_domain.Exploration.create_default_exploration('eid')
exp_services.save_new_exploration(owner_id, exploration)
self.exp_summary = exp_fetchers.get_exploration_summary_by_id('eid')
def test_validation_passes_with_valid_properties(self):
self.exp_summary.validate()
def test_validation_fails_with_invalid_title(self):
self.exp_summary.title = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected title to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_category(self):
self.exp_summary.category = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected category to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_objective(self):
self.exp_summary.objective = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected objective to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_language_code(self):
self.exp_summary.language_code = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_unallowed_language_code(self):
self.exp_summary.language_code = 'invalid'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_tags(self):
self.exp_summary.tags = 'tags'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected \'tags\' to be a list, received tags'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_tag_in_tags(self):
self.exp_summary.tags = ['tag', 2]
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each tag in \'tags\' to be a string, received \'2\''):
self.exp_summary.validate()
def test_validation_fails_with_empty_tag_in_tags(self):
self.exp_summary.tags = ['', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, 'Tags should be non-empty'):
self.exp_summary.validate()
def test_validation_fails_with_unallowed_characters_in_tag(self):
self.exp_summary.tags = ['123', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Tags should only contain lowercase '
'letters and spaces, received \'123\'')):
self.exp_summary.validate()
def test_validation_fails_with_whitespace_in_tag_start(self):
self.exp_summary.tags = [' ab', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \' ab\''):
self.exp_summary.validate()
def test_validation_fails_with_whitespace_in_tag_end(self):
self.exp_summary.tags = ['ab ', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \'ab \''):
self.exp_summary.validate()
def test_validation_fails_with_adjacent_whitespace_in_tag(self):
self.exp_summary.tags = ['a b', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Adjacent whitespace in tags should '
'be collapsed, received \'a b\'')):
self.exp_summary.validate()
def test_validation_fails_with_duplicate_tags(self):
self.exp_summary.tags = ['abc', 'abc', 'ab']
with self.assertRaisesRegexp(
utils.ValidationError, 'Some tags duplicate each other'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_rating_type(self):
self.exp_summary.ratings = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected ratings to be a dict, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_rating_keys(self):
self.exp_summary.ratings = {'1': 0, '10': 1}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected ratings to have keys: 1, 2, 3, 4, 5, received 1, 10'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_value_type_for_ratings(self):
self.exp_summary.ratings = {'1': 0, '2': 'one', '3': 0, '4': 0, '5': 0}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected value to be int, received one'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_value_for_ratings(self):
self.exp_summary.ratings = {'1': 0, '2': -1, '3': 0, '4': 0, '5': 0}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected value to be non-negative, received -1'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_scaled_average_rating(self):
self.exp_summary.scaled_average_rating = 'one'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected scaled_average_rating to be float, received one'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_status(self):
self.exp_summary.status = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_community_owned(self):
self.exp_summary.community_owned = '1'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected community_owned to be bool, received 1'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributors_summary(self):
self.exp_summary.contributors_summary = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributors_summary to be dict, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_owner_ids_type(self):
self.exp_summary.owner_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected owner_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_owner_id_in_owner_ids(self):
self.exp_summary.owner_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in owner_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_editor_ids_type(self):
self.exp_summary.editor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected editor_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_editor_id_in_editor_ids(self):
self.exp_summary.editor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in editor_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_voice_artist_ids_type(self):
self.exp_summary.voice_artist_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected voice_artist_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_voice_artist_id_in_voice_artists_ids(
self):
self.exp_summary.voice_artist_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in voice_artist_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_viewer_ids_type(self):
self.exp_summary.viewer_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected viewer_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_viewer_id_in_viewer_ids(self):
self.exp_summary.viewer_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in viewer_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributor_ids_type(self):
self.exp_summary.contributor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributor_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributor_id_in_contributor_ids(
self):
self.exp_summary.contributor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in contributor_ids to be string, received 2'):
self.exp_summary.validate()
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, self.SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
# Verify SAMPLE_UNTITLED_YAML_CONTENT can be converted to an exploration
# without error.
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', self.SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', self.SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', self.SAMPLE_YAML_CONTENT)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_STATE_SCHEMA_VERSION)
for version_num in range(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in range(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = ("""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
language:
value: ''
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: CodeRepl
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
YAML_CONTENT_V11 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 11
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 8
tags: []
title: Title
""")
YAML_CONTENT_V12 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 12
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: END
feedback:
- Correct!
id: TextInput
param_changes: []
states_schema_version: 9
tags: []
title: Title
""")
YAML_CONTENT_V13 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 13
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 10
tags: []
title: Title
""")
YAML_CONTENT_V14 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 14
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: []
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 11
tags: []
title: Title
""")
YAML_CONTENT_V15 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 15
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 12
tags: []
title: Title
""")
YAML_CONTENT_V16 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 16
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V17 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 17
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V18 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 18
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints:
- hint_text: ''
id: TextInput
solution:
explanation: ''
answer_is_exclusive: False
correct_answer: Answer
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V19 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 19
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 14
tags: []
title: Title
""")
YAML_CONTENT_V20 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 20
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 15
tags: []
title: Title
""")
YAML_CONTENT_V21 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 21
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: FractionInput
solution: null
param_changes: []
states_schema_version: 16
tags: []
title: Title
""")
YAML_CONTENT_V22 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 22
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 17
tags: []
title: Title
""")
YAML_CONTENT_V23 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 23
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 18
tags: []
title: Title
""")
YAML_CONTENT_V24 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 24
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 19
tags: []
title: Title
""")
YAML_CONTENT_V25 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 25
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 20
tags: []
title: Title
""")
YAML_CONTENT_V26 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 26
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: Correct!
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: Congratulations, you have finished!
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 21
tags: []
title: Title
""")
YAML_CONTENT_V27 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 27
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 22
tags: []
title: Title
""")
YAML_CONTENT_V28 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 28
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 23
tags: []
title: Title
""")
YAML_CONTENT_V29 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 29
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
imageAndRegions:
value:
imagePath: s1ImagePath.png
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ImageClickInput
solution: null
param_changes: []
states_schema_version: 24
tags: []
title: Title
""")
YAML_CONTENT_V30 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 30
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 25
tags: []
title: Title
""")
YAML_CONTENT_V31 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 31
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
new_content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 26
tags: []
title: Title
""")
YAML_CONTENT_V32 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 32
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 27
tags: []
title: Title
""")
YAML_CONTENT_V33 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 33
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 28
tags: []
title: Title
""")
YAML_CONTENT_V34 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 34
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 29
tags: []
title: Title
""")
YAML_CONTENT_V35 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V35
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_cannot_load_from_v6_with_invalid_handler_name(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: invalid_handler_name
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert rules with a name '
'\'submit\' in states v3 to v4 conversion process. '):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_rule(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: invalid_rule
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert default and atomic '
'rules in states v3 to v4 conversion process.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_subject(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
subject: invalid_subject
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert rules with an \'answer\' '
'subject in states v3 to v4 conversion process.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_interaction_id(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: invalid_id
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Trying to migrate exploration containing non-existent '
'interaction ID'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
language:
value: python
placeholder:
value: ''
postCode:
value: ''
preCode:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: CodeRepl
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v11(self):
"""Test direct loading from a v11 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V11)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v12(self):
"""Test direct loading from a v12 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Correct!</p>
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
hint_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
hint_1: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V12)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v13(self):
"""Test direct loading from a v13 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V13)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v14(self):
"""Test direct loading from a v14 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V14)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v15(self):
"""Test direct loading from a v15 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V15)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v16(self):
"""Test direct loading from a v16 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V16)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v17(self):
"""Test direct loading from a v17 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V17)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v18(self):
"""Test direct loading from a v18 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: ''
id: TextInput
solution:
answer_is_exclusive: false
correct_answer: Answer
explanation:
content_id: solution
html: ''
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
hint_1: {}
solution: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
hint_1: {}
solution: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V18)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v19(self):
"""Test direct loading from a v19 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V19)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v20(self):
"""Test direct loading from a v20 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V20)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v21(self):
"""Test direct loading from a v21 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
allowImproperFraction:
value: true
allowNonzeroIntegerPart:
value: true
customPlaceholder:
value: ''
placeholder:
value: ''
requireSimplestForm:
value: false
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: FractionInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V21)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v22(self):
"""Test direct loading from a v22 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V22)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v23(self):
"""Test direct loading from a v23 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V23)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v24(self):
"""Test direct loading from a v24 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V24)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v25(self):
"""Test direct loading from a v25 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V25)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v26(self):
"""Test direct loading from a v26 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V26)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v27(self):
"""Test direct loading from a v27 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V27)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v28(self):
"""Test direct loading from a v28 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V28)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v29(self):
"""Test direct loading from a v29 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
highlightRegionsOnHover:
value: false
imageAndRegions:
value:
imagePath: s1ImagePath_height_120_width_120.png
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ImageClickInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V29)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v30(self):
"""Test direct loading from a v30 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V30)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v31(self):
"""Test direct loading from a v31 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V31)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v32(self):
"""Test direct loading from a v32 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V32)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v33(self):
"""Test direct loading from a v33 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V33)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_cannot_load_from_yaml_with_no_schema_version(self):
sample_yaml_content = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
with self.assertRaisesRegexp(
Exception, 'Invalid YAML file: no schema version specified.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', sample_yaml_content)
def test_cannot_load_from_yaml_with_invalid_schema_version(self):
sample_yaml_content = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 0
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1 to v%s exploration YAML files '
'at present.' % exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', sample_yaml_content)
class HTMLMigrationUnitTests(test_utils.GenericTestBase):
"""Test HTML migration."""
YAML_CONTENT_V26_TEXTANGULAR = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: Introduction
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 26
states:
Introduction:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: Introduction
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
state1:
classifier_model_id: null
content:
content_id: content
html: <blockquote><p>Hello, this is state1</p></blockquote>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
solution: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: Default <p>outcome</p> for state1
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution:
answer_is_exclusive: true
correct_answer: Answer1
explanation:
content_id: solution
html: This is <i>solution</i> for state1
param_changes: []
state2:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, </p>this <i>is </i>state2
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <div>Outcome1 for state2</div>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
- inputs:
x: 1
rule_type: Equals
tagged_misconception_id: null
training_data: []
- outcome:
dest: state3
feedback:
content_id: feedback_2
html: <pre>Outcome2 <br>for state2</pre>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is </p>value1 <br>for MultipleChoice
- This is value2<span> for <br>MultipleChoice</span>
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Hello, this is<div> html1<b> for </b></div>state2</p>
- hint_content:
content_id: hint_2
html: Here is link 2 <oppia-noninteractive-link
text-with-value="&quot;discussion forum&quot;"
url-with-value="&quot;https://groups.google.com/
forum/?fromgroups#!forum/oppia&quot;">
</oppia-noninteractive-link>
id: MultipleChoiceInput
solution: null
param_changes: []
state3:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, this is state3</p>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: Here is the image1 <i><oppia-noninteractive-image
caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue.png&quot;"
alt-with-value="&quot;&quot;">
</oppia-noninteractive-image></i>Here is the image2
<div><oppia-noninteractive-image caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue.png&quot;"
alt-with-value="&quot;&quot;">
</oppia-noninteractive-image></div>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- This <span>is value1 for </span>ItemSelectionInput
rule_type: Equals
- inputs:
x:
- This is value3 for ItemSelectionInput
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- This <span>is value1 for </span>ItemSelection
- This <code>is value2</code> for ItemSelection
- This is value3 for ItemSelection
maxAllowableSelectionCount:
value: 1
minAllowableSelectionCount:
value: 1
default_outcome:
dest: state3
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ItemSelectionInput
solution: null
param_changes: []
states_schema_version: 21
tags: []
title: title
""")
# pylint: disable=line-too-long
YAML_CONTENT_V35_IMAGE_DIMENSIONS = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: Introduction
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
Introduction:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: Introduction
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
state1:
classifier_model_id: null
content:
content_id: content
html: <blockquote><p>Hello, this is state1</p></blockquote>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: <p>Default </p><p>outcome</p><p> for state1</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution:
answer_is_exclusive: true
correct_answer: Answer1
explanation:
content_id: solution
html: <p>This is <em>solution</em> for state1</p>
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solution: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
solution: {}
state2:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, </p><p>this <em>is </em>state2</p>
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <p>Outcome1 for state2</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
- inputs:
x: 1
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
- outcome:
dest: state3
feedback:
content_id: feedback_2
html: "<pre>Outcome2 \\nfor state2</pre>"
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is </p><p>value1 <br>for MultipleChoice</p>
- <p>This is value2 for <br>MultipleChoice</p>
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Hello, this is</p><p> html1<strong> for </strong></p><p>state2</p>
- hint_content:
content_id: hint_2
html: <p>Here is link 2 <oppia-noninteractive-link text-with-value="&quot;discussion
forum&quot;" url-with-value="&quot;https://groups.google.com/
forum/?fromgroups#!forum/oppia&quot;"> </oppia-noninteractive-link></p>
id: MultipleChoiceInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
state3:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, this is state3</p>
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <p>Here is the image1 </p><oppia-noninteractive-image alt-with-value="&quot;&quot;"
caption-with-value="&quot;&quot;" filepath-with-value="&quot;startBlue_height_490_width_120.png&quot;">
</oppia-noninteractive-image><p>Here is the image2 </p><oppia-noninteractive-image
alt-with-value="&quot;&quot;" caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue_height_490_width_120.png&quot;">
</oppia-noninteractive-image>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- <p>This is value1 for ItemSelectionInput</p>
rule_type: Equals
- inputs:
x:
- <p>This is value3 for ItemSelectionInput</p>
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is value1 for ItemSelection</p>
- <p>This is value2 for ItemSelection</p>
- <p>This is value3 for ItemSelection</p>
maxAllowableSelectionCount:
value: 1
minAllowableSelectionCount:
value: 1
default_outcome:
dest: state3
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ItemSelectionInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
states_schema_version: 30
tags: []
title: title
""")
YAML_CONTENT_V27_WITHOUT_IMAGE_CAPTION = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 27
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: <p><oppia-noninteractive-image filepath-with-value="&quot;random.png&quot;"></oppia-noninteractive-image>Hello this
is test case to check image tag inside p tag</p>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 22
tags: []
title: Title
""")
YAML_CONTENT_V35_WITH_IMAGE_CAPTION = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: <oppia-noninteractive-image caption-with-value="&quot;&quot;"
filepath-with-value="&quot;random_height_490_width_120.png&quot;"></oppia-noninteractive-image><p>Hello
this is test case to check image tag inside p tag</p>
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
# pylint: enable=line-too-long
def test_load_from_v26_textangular(self):
"""Test direct loading from a v26 yaml file."""
mock_get_filename_with_dimensions_context = self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions)
with mock_get_filename_with_dimensions_context:
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V26_TEXTANGULAR)
self.assertEqual(
exploration.to_yaml(), self.YAML_CONTENT_V35_IMAGE_DIMENSIONS)
def test_load_from_v27_without_image_caption(self):
"""Test direct loading from a v27 yaml file."""
mock_get_filename_with_dimensions_context = self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions)
with mock_get_filename_with_dimensions_context:
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V27_WITHOUT_IMAGE_CAPTION)
self.assertEqual(
exploration.to_yaml(), self.YAML_CONTENT_V35_WITH_IMAGE_CAPTION)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
"""Gets the default state dict of the exploration."""
return {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': content_str,
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
'default_outcome': {}
}
},
'solicit_answer_details': False,
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {}
}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': {
'content_id': feconf.DEFAULT_OUTCOME_CONTENT_ID,
'html': ''
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'hints': [],
'id': None,
'solution': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'objective': feconf.DEFAULT_EXPLORATION_OBJECTIVE,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'language_code': 'en',
'correctness_feedback_enabled': False,
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
class HtmlCollectionTests(test_utils.GenericTestBase):
"""Test method to obtain all html strings."""
def test_all_html_strings_are_collected(self):
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title='title', category='category')
exploration.add_states(['state1', 'state2', 'state3', 'state4'])
state1 = exploration.states['state1']
state2 = exploration.states['state2']
state3 = exploration.states['state3']
state4 = exploration.states['state4']
content1_dict = {
'content_id': 'content',
'html': '<blockquote>Hello, this is state1</blockquote>'
}
content2_dict = {
'content_id': 'content',
'html': '<pre>Hello, this is state2</pre>'
}
content3_dict = {
'content_id': 'content',
'html': '<p>Hello, this is state3</p>'
}
content4_dict = {
'content_id': 'content',
'html': '<p>Hello, this is state4</p>'
}
state1.update_content(
state_domain.SubtitledHtml.from_dict(content1_dict))
state2.update_content(
state_domain.SubtitledHtml.from_dict(content2_dict))
state3.update_content(
state_domain.SubtitledHtml.from_dict(content3_dict))
state4.update_content(
state_domain.SubtitledHtml.from_dict(content4_dict))
state1.update_interaction_id('TextInput')
state2.update_interaction_id('MultipleChoiceInput')
state3.update_interaction_id('ItemSelectionInput')
state4.update_interaction_id('DragAndDropSortInput')
customization_args_dict1 = {
'placeholder': {'value': ''},
'rows': {'value': 1}
}
customization_args_dict2 = {
'choices': {'value': [
'<p>This is value1 for MultipleChoice</p>',
'<p>This is value2 for MultipleChoice</p>'
]}
}
customization_args_dict3 = {
'choices': {'value': [
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
'<p>This is value3 for ItemSelection</p>'
]}
}
customization_args_dict4 = {
'choices': {'value': [
'<p>This is value1 for DragAndDropSortInput</p>',
'<p>This is value2 for DragAndDropSortInput</p>',
]}
}
state1.update_interaction_customization_args(customization_args_dict1)
state2.update_interaction_customization_args(customization_args_dict2)
state3.update_interaction_customization_args(customization_args_dict3)
state4.update_interaction_customization_args(customization_args_dict4)
default_outcome_dict1 = {
'dest': 'state2',
'feedback': {
'content_id': 'default_outcome',
'html': '<p>Default outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
}
state1.update_interaction_default_outcome(default_outcome_dict1)
hint_list2 = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}, {
'hint_content': {
'content_id': 'hint_2',
'html': '<p>Hello, this is html2 for state2</p>'
}
}]
state2.update_interaction_hints(hint_list2)
solution_dict1 = {
'interaction_id': '',
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
state1.update_interaction_solution(solution_dict1)
answer_group_list2 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': 0}
}, {
'rule_type': 'Equals',
'inputs': {'x': 1}
}],
'outcome': {
'dest': 'state1',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Outcome1 for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}, {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': 0}
}],
'outcome': {
'dest': 'state3',
'feedback': {
'content_id': 'feedback_2',
'html': '<p>Outcome2 for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
answer_group_list3 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelectionInput</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value3 for ItemSelectionInput</p>'
]}
}],
'outcome': {
'dest': 'state1',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Outcome for state3</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state2.update_interaction_answer_groups(answer_group_list2)
state3.update_interaction_answer_groups(answer_group_list3)
expected_html_list = [
'',
'',
'<pre>Hello, this is state2</pre>',
'<p>Outcome1 for state2</p>',
'<p>Outcome2 for state2</p>',
'',
'<p>Hello, this is html1 for state2</p>',
'<p>Hello, this is html2 for state2</p>',
'<p>This is value1 for MultipleChoice</p>',
'<p>This is value2 for MultipleChoice</p>',
'<blockquote>Hello, this is state1</blockquote>',
'<p>Default outcome for state1</p>',
'<p>This is solution for state1</p>',
'<p>Hello, this is state3</p>',
'<p>Outcome for state3</p>',
'<p>This is value1 for ItemSelectionInput</p>',
'<p>This is value3 for ItemSelectionInput</p>',
'',
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
'<p>This is value3 for ItemSelection</p>',
'<p>Hello, this is state4</p>',
'',
'<p>This is value1 for DragAndDropSortInput</p>',
'<p>This is value2 for DragAndDropSortInput</p>'
]
actual_outcome_list = exploration.get_all_html_content_strings()
self.assertEqual(actual_outcome_list, expected_html_list)
|
import pytest
import os
import os
from collections import defaultdict
from syrupy.extensions.single_file import SingleFileSnapshotExtension
from tempgen.module import Tempgen
from tempgen.parsers import Parsers
from tempgen.transforms import Transforms
from tempgen.tests.helpers import ext_serializer_map
tests_dir = os.path.dirname(os.path.abspath(__file__))
fixture_dir = os.path.join(tests_dir, 'fixtures')
fixture_name = 'test_template'
generated_name = 'generated'
transforms = Transforms().name_transform_map.keys()
extensions = Parsers().ext_parser_map.keys()
serializers = ext_serializer_map
@pytest.fixture(autouse=True)
def tempgen_instance():
return Tempgen()
@pytest.fixture
def tempgen_instances(request):
return [Tempgen() for _ in range(request.param)]
@pytest.mark.parametrize('extension', extensions)
def test_load_template(extension, tempgen_instance, snapshot):
template = os.path.join(fixture_dir, fixture_name + extension)
tempgen_instance.load_template(template)
assert template in tempgen_instance.get_templates()
assert tempgen_instance.get_fields() == snapshot
@pytest.mark.parametrize('extension', extensions)
def test_save_result(extension, tempgen_instance, snapshot):
template = os.path.join(fixture_dir, fixture_name + extension)
tempgen_instance.load_template(template)
replacements = { key: value['value'] for key, value in tempgen_instance.get_fields().items() }
replacements['doer'] = 'Петров П.П.'
replacements['itn'] = '987654321098'
save_path = os.path.join(fixture_dir, generated_name)
tempgen_instance.save_result(template, save_path, replacements)
assert ext_serializer_map[extension](save_path + extension) == snapshot
os.remove(save_path + extension)
@pytest.mark.parametrize('extension', extensions)
@pytest.mark.parametrize('transform', transforms)
@pytest.mark.parametrize('tempgen_instances', [2], indirect=['tempgen_instances'])
def test_independence(extension, transform, tempgen_instances):
instance_0, instance_1 = tempgen_instances
assert instance_0.parsers != instance_1.parsers
assert instance_0.transforms != instance_1.transforms
instance_0.parsers[extension].parse = lambda *args, **kwargs: ({})
instance_0.transforms[transform] = lambda x: x
assert instance_0.parsers != instance_1.parsers
assert instance_0.transforms != instance_1.transforms
|
#!/usr/bin/python3
"""Create class"""
class Square:
"""Square class"""
def __init__(self, size=0, position=(0, 0)):
"""Initialize Square"""
self.__size = size
self.position = position
"""if type(size) is not int:
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")"""
def area(self, area=0):
"""defines area"""
return(self.__size * self.__size)
@property
def size(self):
""" define size"""
return self.__size
@size.setter
def size(self, value):
"""Define area"""
if type(value) is not int:
raise TypeError("size must be an integer")
if value < 0:
raise ValueError("size must be >= 0")
self.__size = value
def my_print(self):
"""print Square"""
if self.__size:
for i in range(self.__position[1]):
print()
for j in range(self.__size):
print('{}{}'.format(' ' * self.position[0], '#' * self.__size))
else:
print()
@property
def position(self):
""" position"""
return self.__position
@position.setter
def position(self, value):
if (not isinstance(value, tuple) or
len(value) != 2 or
not isinstance(value[0], int) or
not isinstance(value[1], int) or
value[0] < 0 or
value[1] < 0):
raise TypeError("position must be a tuple of 2 positive integers")
self.__position = value
|
#This is a mixin class for signal analyzers to be used by f2_system class
#Todo: Analyzers should be a independent class
#Last modification by Marko Kosunen, marko.kosunen@aalto.fi, 30.07.2018 18:09
import numpy as np
import scipy.signal as sig
import matplotlib as mpl
mpl.use('Agg') #To enble plotting without X
import matplotlib.pyplot as plt
class analyzers_mixin:
#Define signal analyzer methods
def oscilloscope(self,argdict):
ymax=argdict['ymax']
ymin=argdict['ymin']
timex=argdict['timex']
sigin=argdict['sigin']
tstr=argdict['tstr']
printstr=argdict['printstr']
msg="Generating %s" %(printstr)
self.print_log(type='I', msg=msg)
figure=plt.figure()
h=plt.subplot();
hfont = {'fontname':'Sans'}
plt.plot(timex, sigin, linewidth=2)
plt.ylim((ymin, ymax));
plt.xlim((np.amin(timex), np.amax(timex)));
#plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);
#plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);
tstr=argdict['tstr']
plt.suptitle(tstr,fontsize=20);
plt.ylabel('Out', **hfont,fontsize=18);
plt.xlabel('Sample (n)', **hfont,fontsize=18);
h.tick_params(labelsize=14)
plt.grid(True);
printstr=argdict['printstr']
figure.savefig(printstr, format='eps', dpi=300);
plt.close("all")
def constellation(self,argdict):
ymax=argdict['ymax']
ymin=argdict['ymin']
I=argdict['I']
Q=argdict['Q']
tstr=argdict['tstr']
printstr=argdict['printstr']
msg="Generating %s" %(printstr)
self.print_log(type='I', msg=msg)
figure=plt.figure()
h=plt.subplot();
hfont = {'fontname':'Sans'}
plt.plot(I, Q, linestyle='None', marker='x')
plt.ylim((ymin, ymax));
plt.ylim((1.1*np.amin(Q), 1.1*np.amax(Q)));
plt.xlim((1.1*np.amin(I), 1.1*np.amax(I)));
#plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);
#plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);
tstr=argdict['tstr']
plt.suptitle(tstr,fontsize=20);
plt.ylabel('Q', **hfont,fontsize=18);
plt.xlabel('I', **hfont,fontsize=18);
h.tick_params(labelsize=14)
plt.grid(True);
printstr=argdict['printstr']
figure.savefig(printstr, format='eps', dpi=300);
plt.close("all")
def spectrum_analyzer(self, **kwargs):
#Example argdict
#argdict={'sig':self.signal_gen._Z.Data[i,:,0],'ymax':3, 'ymin':spectrumfloorideal,'nperseg':1024,
# 'tstr' : "Tx, User:%i" %(i),'printstr':"%s/F2_system_Tx_antennas_Spectrum_Rs_%i_k:%i.eps" %(self.picpath, self.Rs, i)}
ymax=kwargs.get('ymax',3)
ymin=kwargs.get('ymin',-80)
nperseg=kwargs.get('nperseg',1024) #Samples for the Welch periodogram seqment
fs=kwargs.get('Rs',self.Rs)
freqx=np.arange(nperseg)/nperseg*fs/1e6
freqx.shape=(-1,1)
sigin=kwargs['sigin']
sigin.shape=(-1,1)
tstr=kwargs['tstr']
printstr=kwargs['printstr']
msg="Generating %s" %(printstr)
self.print_log(type='I', msg=msg)
figure=plt.figure()
h=plt.subplot();
hfont = {'fontname':'Sans'}
fs, spe=sig.welch(sigin,fs=self.Rs,nperseg=nperseg,return_onesided=False,scaling='spectrum',axis=0)
spelog=10*np.log10(np.abs(spe)/np.amax(np.abs(spe)))
plt.plot(freqx,spelog, linewidth=2 )
#plt.setp(markerline,'markerfacecolor', 'b','linewidth',2)
#plt.setp(stemlines, 'linestyle','solid','color','b', 'linewidth', 2)
#plt.ylim((np.amin([self.a,self.b]), np.amax([self.a,self.b])));
plt.ylim((ymin, ymax));
plt.xlim((np.amin(freqx), np.amax(freqx)));
#plt.plot(self.x,self.a,label='Blowing in the wind',linewidth=2);
#plt.plot(self.x,self.b,label='Blowing in the wind',linewidth=2);
plt.suptitle(tstr,fontsize=20);
plt.ylabel('Normalized Spectrum', **hfont,fontsize=18);
plt.xlabel('Frequency (MHz)', **hfont,fontsize=18);
h.tick_params(labelsize=14)
#for axis in ['top','bottom','left','right']:
#h.spines[axis].set_linewidth(2)
#lgd=plt.legend(loc='upper right', fontsize=14);
##lgd.set_fontsize(12);
plt.grid(True);
figure.savefig(printstr, format='eps', dpi=300);
plt.close("all")
def logic_analyzer(self,argdict):
ymax=argdict['ymax']
ymin=argdict['ymin']
timex=argdict['timex']
sigin=argdict['sigin']
tstr = argdict['tstr']
printstr=argdict['printstr']
msg="Generating %s" %(printstr)
self.print_log(type='I', msg=msg)
figure=plt.figure()
h=plt.subplot();
hfont = {'fontname':'Sans'}
markerline, stemlines, baseline = plt.stem(timex, sigin, '-.')
plt.setp(markerline,'markerfacecolor', 'b','linewidth',2)
plt.setp(stemlines, 'linestyle','solid','color','b', 'linewidth', 2)
plt.ylim((ymin, ymax));
plt.xlim((np.amin(timex), np.amax(timex)));
plt.suptitle(tstr,fontsize=20);
plt.ylabel('Out', **hfont,fontsize=18);
plt.xlabel('Sample (n)', **hfont,fontsize=18);
h.tick_params(labelsize=14)
plt.grid(True);
figure.savefig(printstr, format='eps', dpi=300);
def evm_calculator(self,argdict):
reference=argdict['ref']
received=argdict['signal']
#Shape the vectors: time is row observation is colum
#if received.shape[0]<received.shape[1]:
# received=np.transpose(received)
reference.shape=(-1,1)
received.shape=(-1,1)
#RMS for Scaling
rmsref=np.std(reference)
rmsreceived=np.std(received)
EVM=10*np.log10(np.mean(np.mean(np.abs(received/rmsreceived*rmsref-reference)**2,axis=0)/np.mean(np.abs(reference)**2,axis=0)))
self.print_log(type='I', msg="Estimated EVM is %0.2f dB" %(EVM))
return EVM
def ber_calculator(self,argdict):
reference=argdict['ref']
received=argdict['signal']
#Shape the vectors: time is row observation is colum
#if received.shape[0]<received.shape[1]:
# received=np.transpose(received)
#reference.shape=received.shape
reference.shape=(-1,1)
received.shape=(-1,1)
#Discard samples rounded away in transmission
#if received.shape[1] < reference.shape[1]:
# reference=reference[:,0:received.shape[1]]
errors=np.sum(np.sum(np.abs(received-reference),axis=0))/(received.shape[0]*received.shape[1])
errors=np.sum(np.sum(np.abs(received-reference),axis=0))
bits=(received.shape[0]*received.shape[1])
self.print_log(type='I', msg="Received %i errors in %i bits" %(int(errors), int(bits)))
BER=errors/bits
self.print_log(type='I', msg="Resulting BER is %0.3g" %(BER))
return BER
#From Kosta.
def plot_generic(x, y_list, title_str, legend_list, xlabel_str, ylabel_str, xscale, yscale, plot_style_str='o-', xlim=[], ylim=[]):
if (xscale, yscale) == ('linear', 'linear'):
plot_type_str = 'plot'
elif (xscale, yscale) == ('log', 'linear'):
plot_type_str = 'semilogx'
elif (xscale, yscale) == ('linear', 'log'):
plot_type_str = 'semilogy'
elif (xscale, yscale) == ('log', 'log'):
plot_type_str = 'loglog'
else:
raise Exception('xscale = %s, yscale = %s, both should be linear or log!!' % (xscale, yscale))
fig, ax = plt.subplots() # default is 1,1,1
if (isinstance(x[0], list)) and (len(x) == len(y_list)): # several plots with different x values
for x, y in zip(x, y_list):
exec('ax.' + plot_type_str + '(x, y, plot_style_str, linewidth=linewidth)')
else:
if (isinstance(y_list[0], list)): # several plots with the same x values
for y in y_list:
exec('ax.' + plot_type_str + '(x, y, plot_style_str, linewidth=linewidth)')
else: # single plot only
exec('ax.' + plot_type_str + '(x, y_list, plot_style_str, linewidth=linewidth)')
if xlim != []:
plt.xlim(xlim)
if ylim != []:
plt.ylim(ylim)
ax.set_xlabel(xlabel_str, fontsize=fontsize)
plt.ylabel(ylabel_str, fontsize=fontsize)
if title_str == []:
loc_y = 1.05
else:
plt.title(title_str, fontsize=fontsize)
loc_y = 1
if legend_list != []:
plt.legend(legend_list, loc=(0, loc_y))
plt.grid(True, which='both')
ax.tick_params(axis='both', which='major', labelsize=fontsize)
plt.show()
|
"""
McsCMOS
~~~~~~~
Wrapper and Helper to access MCS CMOS Data within H5 Files
:copyright: (c) 2018 by Multi Channel Systems MCS GmbH
:license: see LICENSE for more details
"""
import h5py
import numpy as np
class CMOSData(h5py.File):
"""
Wrapper for a HDF5 File containing CMOS Data
"""
def __init__(self, path):
"""
Creates a CMOSData file and links it to a H5 File
:param path: Path to a H5 File containing CMOS Data
:type path: string
"""
super(CMOSData, self).__init__(path, mode='r')
# -- map raw data --
self.raw_data= self['/Data/Recording_0/FrameStream/Stream_0/FrameDataEntity_0/FrameData']
self.conv_factors= self['/Data/Recording_0/FrameStream/Stream_0/FrameDataEntity_0/ConversionFactors']
# - map proxy data -
self.conv_data = CMOSConvProxy(self)
# -- map meta --
self.meta={}
# - from InfoFrame
info_frame= self['/Data/Recording_0/FrameStream/Stream_0/InfoFrame']
for key in info_frame.dtype.names:
if hasattr(info_frame[key][0], "decode"):
self.meta[key]=info_frame[key][0].decode('utf-8')
else:
self.meta[key]=info_frame[key][0]
if("Tick" in self.meta):
self.meta["FrameRate"] = 10.0**6/self.meta["Tick"]
# - from File
for key, value in self.attrs.items():
if hasattr(value, "decode"):
self.meta[key]= value.decode('utf-8')
else:
self.meta[key]= value
# - from Data Group
for key, value in self['/Data'].attrs.items():
if hasattr(value, "decode"):
self.meta[key]= value.decode('utf-8')
else:
self.meta[key]= value
# -- map events --
if("EventStream" in self["Data/Recording_0/"].keys()):
event_group = self["Data/Recording_0/EventStream/Stream_0/"]
event_info = self["Data/Recording_0/EventStream/Stream_0/InfoEvent"]
self.events={}
self.event_frames={}
for key in event_group.keys():
if "EventEntity" in key:
info = event_info["Label"][event_info["EventID"]==int(key.split("_")[1])][0]
self.events[info] = event_group[key][0, 0]
self.event_frames[info] = event_group[key][0, 0]/self.meta["Tick"]
class CMOSConvProxy:
"""
Private Class, should be embedded within a CMOSData Object.
A proxy that transparently converts raw data to calibrated data.
"""
def __init__(self, parent):
"""
Creates a new CMOSConvProxy
:param parent: Object that can provide raw_data and conv_factors
:type parent: CMOSData
"""
self._parent = parent
self.dtype = np.int32
def __getitem__(self, slices):
"""
Sliced access to converted data
:param slices: Data-slices to retrieve
:returns: converted data
"""
raw_data = self._parent.raw_data.__getitem__(slices)
conv_fasctors = self._parent.conv_factors.__getitem__((slices[0], slices[1]))
return (raw_data*conv_fasctors).astype(self.dtype)
@property
def shape(self):
"""
Shape of the data
"""
return self._parent.raw_data.shape
class CMOSSpikes(h5py.File):
"""
Wrapper for a HDF5 File containing CMOS Spike Data.
Spike Information is accessible through the .spike Member,
Waveform Information (if available) through the .waveforms Member.
"""
def __init__(self, path):
super(CMOSSpikes, self).__init__(path)
# -- Check for right structure --
if("data" in self.keys() and "spikes" in self['data'].keys()):
# -- Map Spike-Data to RecordArray
self.spikes = np.core.records.fromarrays(self['data/spikes'][:,:],
names='time, col, row',
formats = 'int64, int64, int64')
# -- Map Waveforms to Array
if("waveforms" in self['data'].keys()):
self.waveforms = self['data/waveforms'][:,:].transpose()
else:
raise IOError(path+ " has no valid CMOSSpikeFile Structure")
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='namelengthsrc',
parent_name='bar.hoverlabel',
**kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
|
from rest_framework_extensions.key_constructor.constructors import DefaultKeyConstructor
from rest_framework_extensions.key_constructor.constructors import bits
class UserKeyConstructor(DefaultKeyConstructor):
user = bits.UserKeyBit()
|
import numpy as np
from PIL import Image
import sys
def numpy_to_png(source, dest):
image = Image.fromarray(np.load(source))
image.save(dest,"PNG")
if __name__ == "__main__":
source = sys.argv[1]
dest = source.split('.npy')[0] + '.png'
print(source, " to ", dest)
numpy_to_png(source, dest)
|
# -*- coding: utf-8 -*-
from codecs import open
import logging
import colander
import deform
from itsdangerous import BadData, SignatureExpired
from h import i18n, models, validators
from h.accounts import util
from h.services.user import UserNotActivated
from h.models.user import (
EMAIL_MAX_LENGTH,
USERNAME_MAX_LENGTH,
USERNAME_MIN_LENGTH,
USERNAME_PATTERN,
)
from h.schemas.base import CSRFSchema, JSONSchema
_ = i18n.TranslationString
log = logging.getLogger(__name__)
PASSWORD_MIN_LENGTH = 2 # FIXME: this is ridiculous
USERNAME_BLACKLIST = None
def get_blacklist():
global USERNAME_BLACKLIST
if USERNAME_BLACKLIST is None:
# Try to load the blacklist file from disk. If, for whatever reason, we
# can't load the file, then don't crash out, just log a warning about
# the problem.
try:
with open('h/accounts/blacklist', encoding='utf-8') as fp:
blacklist = fp.readlines()
except (IOError, ValueError):
log.exception('unable to load blacklist')
blacklist = []
USERNAME_BLACKLIST = set(l.strip().lower() for l in blacklist)
return USERNAME_BLACKLIST
def unique_email(node, value):
'''Colander validator that ensures no user with this email exists.'''
request = node.bindings['request']
user = models.User.get_by_email(request.db, value, request.authority)
if user and user.userid != request.authenticated_userid:
msg = _("Sorry, an account with this email address already exists.")
raise colander.Invalid(node, msg)
def unique_username(node, value):
'''Colander validator that ensures the username does not exist.'''
request = node.bindings['request']
user = models.User.get_by_username(request.db, value, request.authority)
if user:
msg = _("This username is already taken.")
raise colander.Invalid(node, msg)
def email_node(**kwargs):
"""Return a Colander schema node for a new user email."""
return colander.SchemaNode(
colander.String(),
validator=colander.All(
validators.Length(max=EMAIL_MAX_LENGTH),
validators.Email(),
unique_email,
),
widget=deform.widget.TextInputWidget(template='emailinput'),
**kwargs)
def unblacklisted_username(node, value, blacklist=None):
'''Colander validator that ensures the username is not blacklisted.'''
if blacklist is None:
blacklist = get_blacklist()
if value.lower() in blacklist:
# We raise a generic "user with this name already exists" error so as
# not to make explicit the presence of a blacklist.
msg = _("Sorry, an account with this username already exists. "
"Please enter another one.")
raise colander.Invalid(node, msg)
def password_node(**kwargs):
"""Return a Colander schema node for an existing user password."""
kwargs.setdefault('widget', deform.widget.PasswordWidget())
return colander.SchemaNode(
colander.String(),
**kwargs)
def new_password_node(**kwargs):
"""Return a Colander schema node for a new user password."""
kwargs.setdefault('widget', deform.widget.PasswordWidget())
return colander.SchemaNode(
colander.String(),
validator=validators.Length(min=PASSWORD_MIN_LENGTH),
**kwargs)
class LoginSchema(CSRFSchema):
username = colander.SchemaNode(
colander.String(),
title=_('Username / email'),
widget=deform.widget.TextInputWidget(autofocus=True),
)
password = colander.SchemaNode(
colander.String(),
title=_('Password'),
widget=deform.widget.PasswordWidget()
)
def validator(self, node, value):
super(LoginSchema, self).validator(node, value)
request = node.bindings['request']
username = value.get('username')
password = value.get('password')
user_service = request.find_service(name='user')
user_password_service = request.find_service(name='user_password')
try:
user = user_service.fetch_for_login(username_or_email=username)
except UserNotActivated:
err = colander.Invalid(node)
err['username'] = _("Please check your email and open the link "
"to activate your account.")
raise err
if user is None:
err = colander.Invalid(node)
err['username'] = _('User does not exist.')
raise err
if not user_password_service.check_password(user, password):
err = colander.Invalid(node)
err['password'] = _('Wrong password.')
raise err
value['user'] = user
class ForgotPasswordSchema(CSRFSchema):
email = colander.SchemaNode(
colander.String(),
validator=colander.All(validators.Email()),
title=_('Email address'),
widget=deform.widget.TextInputWidget(template='emailinput',
autofocus=True),
)
def validator(self, node, value):
super(ForgotPasswordSchema, self).validator(node, value)
request = node.bindings['request']
email = value.get('email')
user = models.User.get_by_email(request.db, email, request.authority)
if user is None:
err = colander.Invalid(node)
err['email'] = _('Unknown email address.')
raise err
value['user'] = user
class RegisterSchema(CSRFSchema):
username = colander.SchemaNode(
colander.String(),
validator=colander.All(
validators.Length(min=USERNAME_MIN_LENGTH,
max=USERNAME_MAX_LENGTH),
colander.Regex(
USERNAME_PATTERN,
msg=_("Must have only letters, numbers, periods, and "
"underscores.")),
unique_username,
unblacklisted_username,
),
title=_('Username'),
hint=_('Must be between {min} and {max} characters, containing only '
'letters, numbers, periods, and underscores.').format(
min=USERNAME_MIN_LENGTH,
max=USERNAME_MAX_LENGTH
),
widget=deform.widget.TextInputWidget(autofocus=True),
)
email = email_node(title=_('Email address'))
password = new_password_node(title=_('Password'))
class ResetCode(colander.SchemaType):
"""Schema type transforming a reset code to a user and back."""
def serialize(self, node, appstruct):
if appstruct is colander.null:
return colander.null
if not isinstance(appstruct, models.User):
raise colander.Invalid(node, '%r is not a User' % appstruct)
request = node.bindings['request']
serializer = request.registry.password_reset_serializer
return serializer.dumps(appstruct.username)
def deserialize(self, node, cstruct):
if cstruct is colander.null:
return colander.null
request = node.bindings['request']
serializer = request.registry.password_reset_serializer
try:
(username, timestamp) = serializer.loads(cstruct,
max_age=72*3600,
return_timestamp=True)
except SignatureExpired:
raise colander.Invalid(node, _('Reset code has expired. Please reset your password again'))
except BadData:
raise colander.Invalid(node, _('Wrong reset code.'))
user = models.User.get_by_username(request.db, username, request.authority)
if user is None:
raise colander.Invalid(node, _('Your reset code is not valid'))
if user.password_updated is not None and timestamp < user.password_updated:
raise colander.Invalid(node,
_('This reset code has already been used.'))
return user
class ResetPasswordSchema(CSRFSchema):
# N.B. this is the field into which the user puts their reset code, but we
# call it `user` because when validated, it will return a `User` object.
user = colander.SchemaNode(
ResetCode(),
title=_('Reset code'),
hint=_('This will be emailed to you.'),
widget=deform.widget.TextInputWidget(disable_autocomplete=True))
password = new_password_node(
title=_('New password'),
widget=deform.widget.PasswordWidget(disable_autocomplete=True))
class EmailChangeSchema(CSRFSchema):
email = email_node(title=_('Email address'))
# No validators: all validation is done on the email field
password = password_node(title=_('Confirm password'),
hide_until_form_active=True)
def validator(self, node, value):
super(EmailChangeSchema, self).validator(node, value)
exc = colander.Invalid(node)
request = node.bindings['request']
svc = request.find_service(name='user_password')
user = request.user
if not svc.check_password(user, value.get('password')):
exc['password'] = _('Wrong password.')
if exc.children:
raise exc
class PasswordChangeSchema(CSRFSchema):
password = password_node(title=_('Current password'),
inactive_label=_('Password'))
new_password = password_node(title=_('New password'),
hide_until_form_active=True)
# No validators: all validation is done on the new_password field and we
# merely assert that the confirmation field is the same.
new_password_confirm = colander.SchemaNode(
colander.String(),
title=_('Confirm new password'),
widget=deform.widget.PasswordWidget(),
hide_until_form_active=True)
def validator(self, node, value):
super(PasswordChangeSchema, self).validator(node, value)
exc = colander.Invalid(node)
request = node.bindings['request']
svc = request.find_service(name='user_password')
user = request.user
if value.get('new_password') != value.get('new_password_confirm'):
exc['new_password_confirm'] = _('The passwords must match')
if not svc.check_password(user, value.get('password')):
exc['password'] = _('Wrong password.')
if exc.children:
raise exc
def validate_url(node, cstruct):
try:
util.validate_url(cstruct)
except ValueError as exc:
raise colander.Invalid(node, str(exc))
def validate_orcid(node, cstruct):
try:
util.validate_orcid(cstruct)
except ValueError as exc:
raise colander.Invalid(node, str(exc))
class EditProfileSchema(CSRFSchema):
display_name = colander.SchemaNode(
colander.String(),
missing=None,
validator=validators.Length(max=30),
title=_('Display name'))
description = colander.SchemaNode(
colander.String(),
missing=None,
validator=validators.Length(max=250),
widget=deform.widget.TextAreaWidget(
max_length=250,
rows=4,
),
title=_('Description'))
location = colander.SchemaNode(
colander.String(),
missing=None,
validator=validators.Length(max=100),
title=_('Location'))
link = colander.SchemaNode(
colander.String(),
missing=None,
validator=colander.All(
validators.Length(max=250),
validate_url),
title=_('Link'))
orcid = colander.SchemaNode(
colander.String(),
missing=None,
validator=validate_orcid,
title=_('ORCID'),
hint=_('ORCID provides a persistent identifier for researchers (see orcid.org).'))
class NotificationsSchema(CSRFSchema):
types = (('reply', _('Email me when someone replies to one of my annotations.'),),)
notifications = colander.SchemaNode(
colander.Set(),
widget=deform.widget.CheckboxChoiceWidget(
omit_label=True,
values=types),
)
class CreateUserAPISchema(JSONSchema):
"""Validate a user JSON object."""
schema = {
'type': 'object',
'properties': {
'authority': {
'type': 'string',
'format': 'hostname',
},
'username': {
'type': 'string',
'minLength': 3,
'maxLength': 30,
'pattern': '^[A-Za-z0-9._]+$',
},
'email': {
'type': 'string',
'format': 'email',
},
},
'required': [
'authority',
'username',
'email',
],
}
def includeme(config):
pass
|
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import re
import unittest
from webkitpy.common.host import Host
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
from webkitpy.w3c.test_converter import _W3CTestConverter
DUMMY_FILENAME = 'dummy.html'
DUMMY_PATH = 'dummy/testharness/path'
class W3CTestConverterTest(unittest.TestCase):
# FIXME: When we move to using a MockHost, this method should be removed, since
# then we can just pass in a dummy dir path
def fake_dir_path(self, dirname):
filesystem = Host().filesystem
webkit_root = WebKitFinder(filesystem).webkit_base()
return filesystem.abspath(filesystem.join(webkit_root, "LayoutTests", "css", dirname))
def test_read_prefixed_property_list(self):
""" Tests that the current list of properties requiring the -webkit- prefix load correctly """
# FIXME: We should be passing in a MockHost here ...
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
prop_list = converter.prefixed_properties
self.assertTrue(prop_list, 'No prefixed properties found')
def test_convert_for_webkit_nothing_to_convert(self):
""" Tests convert_for_webkit() using a basic test that has nothing to convert """
test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_no_conversion_happened(converted, test_html)
def test_convert_for_webkit_harness_only(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
</head>
"""
fake_dir_path = self.fake_dir_path("harnessonly")
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
converter.feed(test_html)
converter.close()
converted = converter.output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, [])
def test_convert_for_webkit_properties_only(self):
""" Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test1@: propvalue;"></div>
</body>
</html>
"""
fake_dir_path = self.fake_dir_path('harnessandprops')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_for_webkit_harness_and_properties(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
#block2 { @test1@: propvalue; }
#block3 { @test2@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test3@: propvalue;"></div>
</body>
</html>
"""
fake_dir_path = self.fake_dir_path('harnessandprops')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_test_harness_paths(self):
""" Tests convert_testharness_paths() with a test that uses all three testharness files """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
</head>
"""
fake_dir_path = self.fake_dir_path('testharnesspaths')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 2, 1)
def test_convert_vendor_prefix_js_paths(self):
test_html = """<head>
<script src="/common/vendor-prefix.js">
</head>
"""
fake_dir_path = self.fake_dir_path('adapterjspaths')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
new_html = BeautifulSoup(converted[1])
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/common/vendor-prefix.js')
self.assertEquals(len(new_html.findAll(src=orig_path_pattern)), 0, 'vendor-prefix.js path was not converted')
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
new_relpath = os.path.relpath(resources_dir, fake_dir_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(new_html.findAll(src=relpath_pattern)), 1, 'vendor-prefix.js relative path not correct')
def test_convert_prefixed_properties(self):
""" Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
10 in one style block + 5 in another style
block + 5 inline styles, including one with multiple prefixed properties.
The properties in the test content are in all sorts of wack formatting.
"""
test_html = """<html>
<style type="text/css"><![CDATA[
.block1 {
width: 300px;
height: 300px
}
.block2 {
@test0@: propvalue;
}
.block3{@test1@: propvalue;}
.block4 { @test2@:propvalue; }
.block5{ @test3@ :propvalue; }
#block6 { @test4@ : propvalue; }
#block7
{
@test5@: propvalue;
}
#block8 { @test6@: propvalue; }
#block9:pseudo
{
@test7@: propvalue;
@test8@: propvalue propvalue propvalue;
}
]]></style>
</head>
<body>
<div id="elem1" style="@test9@: propvalue;"></div>
<div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
<div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@ :propvalue;"></div>
<div id="elem3" style="@test14@:propvalue"></div>
</body>
<style type="text/css"><![CDATA[
.block10{ @test15@: propvalue; }
.block11{ @test16@: propvalue; }
.block12{ @test17@: propvalue; }
#block13:pseudo
{
@test18@: propvalue;
@test19@: propvalue;
}
]]></style>
</html>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_prefixed_properties(converted, test_content[0])
def test_hides_all_instructions_for_manual_testers(self):
test_html = """<body>
<h1 class="instructions">Hello manual tester!</h1>
<p class="instructions some_other_class">This is how you run this test.</p>
<p style="willbeoverwritten" class="instructions">...</p>
<doesntmatterwhichtagitis class="some_other_class instructions">...</p>
<p>Legit content may contain the instructions string</p>
</body>
"""
expected_test_html = """<body>
<h1 class="instructions" style="display:none">Hello manual tester!</h1>
<p class="instructions some_other_class" style="display:none">This is how you run this test.</p>
<p class="instructions" style="display:none">...</p>
<doesntmatterwhichtagitis class="some_other_class instructions" style="display:none">...</p>
<p>Legit content may contain the instructions string</p>
</body>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, None)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.assertEqual(converted[1], expected_test_html)
def test_convert_attributes_if_needed(self):
""" Tests convert_attributes_if_needed() using a reference file that has some relative src paths """
test_html = """<html>
<head>
<script src="../../some-script.js"></script>
<style src="../../../some-style.css"></style>
</head>
<body>
<img src="../../../../some-image.jpg">
</body>
</html>
"""
test_reference_support_info = {'reference_relpath': '../', 'files': ['../../some-script.js', '../../../some-style.css', '../../../../some-image.jpg'], 'elements': ['script', 'style', 'img']}
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME, test_reference_support_info)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_reference_relative_paths(converted, test_reference_support_info)
def verify_conversion_happened(self, converted):
self.assertTrue(converted, "conversion didn't happen")
def verify_no_conversion_happened(self, converted, original):
self.assertEqual(converted[1], original, 'test should not have been converted')
def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
if isinstance(converted, basestring):
converted = BeautifulSoup(converted)
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/resources/testharness')
self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
new_relpath = os.path.relpath(resources_dir, test_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
def verify_prefixed_properties(self, converted, test_properties):
self.assertEqual(len(set(converted[0])), len(set(test_properties)), 'Incorrect number of properties converted')
for test_prop in test_properties:
self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
def verify_reference_relative_paths(self, converted, reference_support_info):
idx = 0
for path in reference_support_info['files']:
expected_path = re.sub(reference_support_info['reference_relpath'], '', path, 1)
element = reference_support_info['elements'][idx]
expected_tag = '<' + element + ' src=\"' + expected_path + '\">'
self.assertTrue(expected_tag in converted[1], 'relative path ' + path + ' was not converted correcty')
idx += 1
def generate_test_content(self, full_property_list, num_test_properties, html):
"""Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
test_properties = []
count = 0
while count < num_test_properties:
test_properties.append(full_property_list[count])
count += 1
# Replace the tokens in the testhtml with the test properties. Walk backward
# through the list to replace the double-digit tokens first
index = len(test_properties) - 1
while index >= 0:
# Use the unprefixed version
test_prop = test_properties[index].replace('-webkit-', '')
# Replace the token
html = html.replace('@test' + str(index) + '@', test_prop)
index -= 1
return (test_properties, html)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SecurityRulesOperations(object):
"""SecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.SecurityRule"
"""Get the specified network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> "models.SecurityRule"
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'securityRuleName': self._serialize.url("security_rule_name", security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(security_rule_parameters, 'SecurityRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
security_rule_name, # type: str
security_rule_parameters, # type: "models.SecurityRule"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.SecurityRule"]
"""Creates or updates a security rule in the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param security_rule_name: The name of the security rule.
:type security_rule_name: str
:param security_rule_parameters: Parameters supplied to the create or update network security
rule operation.
:type security_rule_parameters: ~azure.mgmt.network.v2020_03_01.models.SecurityRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SecurityRule or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.SecurityRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
security_rule_name=security_rule_name,
security_rule_parameters=security_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules/{securityRuleName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.SecurityRuleListResult"]
"""Gets all security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/securityRules'} # type: ignore
|
"""
Open Nodes web server
Copyright (c) 2018 Opennodes / Blake Bjorn Anderson
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import gzip
import json
import os
import sys
from io import BytesIO
from flask import Flask, render_template, request, redirect, flash, Response
from flask_sqlalchemy import SQLAlchemy
from geoip2.errors import AddressNotFoundError
from sqlalchemy import and_
from config import load_config, DefaultFlaskConfig
from crawler import init_geoip, connect
from models import *
import pandas as pd
from autodoc import Autodoc
app = Flask(__name__)
auto = Autodoc(app)
app.config.from_object(DefaultFlaskConfig())
app.config.from_object('flask_config')
db = SQLAlchemy(app)
CONF = load_config()
COUNTRY, CITY, ASN = init_geoip()
@app.route('/')
@app.route('/networks/<network_name>', methods=['GET'])
def network_dashboard(network_name=None):
if not network_name in ("okcash", "testnet", None):
flash("Invalid network")
return redirect("/")
with open("static/network_summaries.json", 'r') as f:
summaries = json.load(f)
if network_name:
age_min = summaries[network_name]['age_min']
age_max = summaries[network_name]['age_max']
else:
age_min = min((summaries[network]['age_min'] for network in CONF['networks']))
age_max = max((summaries[network]['age_max'] for network in CONF['networks']))
return render_template("network_dashboard.html",
network=network_name,
include_client=False if network_name is not None else False,
include_user_agent=True if network_name is not None else False,
include_network=True if network_name is None else False,
include_version=True if network_name is not None else False,
include_active=True if CONF['export_inactive_nodes'] else False,
age_min=age_min * 1000.0,
age_max=age_max * 1000.0)
def gzip_response(input_str, pre_compressed):
response = Response()
if not pre_compressed:
buffer = BytesIO()
gzip_file = gzip.GzipFile(mode='wb', fileobj=buffer)
gzip_file.write(input_str if isinstance(input_str, bytes) else input_str.encode())
gzip_file.close()
response.data = buffer.getvalue()
else:
response.data = input_str
response.headers['Content-Encoding'] = 'gzip'
response.headers['Vary'] = 'Accept-Encoding'
response.headers['Content-Length'] = len(response.data)
return response
@app.route('/api/get_networks', methods=['POST'])
@auto.doc()
def get_networks():
"""
Returns a list of all available network names
:return: JSON string, ex. "['okcash','testnet']"
"""
return json.dumps([x[0] for x in db.session.query(Node.network).distinct().all()])
@app.route('/api/gzip_file/<filename>', methods=['GET'])
@auto.doc()
def gzip_static_file(filename):
"""
Returns a crawl result as a gzipped response
:param filename: file_network.ext - file is 'data' or 'history', ext is either .json, .csv, .txt (data.ext returns data for all crawled networks)
:return: gzip encoded html response
"""
valid_files = ["custom.geo.json"]
for coin in ("", "_groestlcoin", "_testnet"):
for suff in ("", "_unique"):
for ext in (".csv", ".json", ".txt"):
valid_files.append("data" + coin + suff + ext)
valid_files.append("history" + coin + '.json')
if filename not in valid_files:
return redirect("/", code=404)
with open(os.path.join("static", filename), "r") as f:
return gzip_response(f.read(), False)
def deconstruct_address_string(inp):
assert isinstance(inp, str)
resp = {}
aliases = {'ok': 'okcash',
'tok': 'testnet'}
inp = inp.lower()
network = inp.split(":")[0]
if network:
inp = ":".join(inp.split(":")[1:])
network = aliases[network] if network in aliases else network
network = network if network in CONF['networks'] else None
if not network:
network = "okcash"
resp['warning'] = "Network not recognized, using OK"
if ":" in inp:
port = inp.split(":")[-1]
try:
port = int(port)
inp = ":".join(inp.split(":")[:-1])
except ValueError:
resp['warning'] = "port not recognized, using default"
port = int(CONF['networks'][network]['port'])
else:
port = int(CONF['networks'][network]['port'])
return network, inp, port, resp
@app.route('/api/check_node', methods=['POST'])
@auto.doc()
def check_node():
"""
Checks the current status of a node. This is a live result, so response times will be longer - to view a saved
result see /api/check_historic_node.
:param node: connection string, e.g. ok:127.0.0.1:6970 - port is optional if it is the network default
:param to_services (integer, optional): outgoing services to broadcast, default=0
:param from_services (integer, optional): outgoing services to broadcast, default=0
:param version (integer, optional): version code to broadcast, default varies by network
:param user_agent (string, optional): user agent to broadcast, default="/oknodes:0.1/"
:param height (integer, optional): block height to broadcast during handshake. default=network median
:param p2p_nodes (bool, optional): issues a getaddr call and list of connected nodes, default=False
:return: json dict {"result":{"user_agent":"/oktoshi:5.0.0.2/", "version":" .... }, "nodes":[["127.0.0.1:6970, 157532132191], ...]}
"""
dat = request.form
node = dat.get("node")
network, address, port, resp = deconstruct_address_string(node)
network_data = CONF['networks'][network]
if dat.get("height"):
network_data['height'] = dat.get("height")
else:
with open("static/network_summaries.json", 'r') as f:
network_data['height'] = int(json.load(f)[network]['med'])
network_data['protocol_version'] = dat.get("version") or network_data['protocol_version']
result = connect(network, address, port,
to_services=dat.get("to_services") or network_data['services'],
network_data=network_data,
user_agent=dat.get("user_agent") or None,
p2p_nodes=False,
explicit_p2p=dat.get("p2p_nodes") or False,
from_services=dat.get('from_services') or None,
keepalive=False)
resp['result'] = result[0]
resp['nodes'] = result[1]
resp['result'] = geocode(resp['result'])
return to_json(resp)
@app.route('/api/check_historic_node', methods=['POST', 'GET'])
@auto.doc()
def check_historic_node():
"""
Checks the status of a node based on the last crawl
result see /api/check_historical_node
:param node: connection string, e.g. ok:127.0.0.1:6970 - port is optional if it is the network default
:return: json dict {"result":{"user_agent":"/oktoshi:5.0.0.2/", "version":" .... }}
"""
if request.method == "POST":
dat = request.form
else:
dat = request.args
node = dat.get("node")
network, address, port, resp = deconstruct_address_string(node)
if network not in CONF['networks']:
return json.dumps({'error': "network not recognized"})
result = db.session.query(Node).get((network, address, port))
resp['result'] = "None" if result is None else result.to_dict()
return to_json(resp)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/api_docs")
def api_docs():
return auto.html()
@app.route('/api/get_nodes', methods=['POST'])
@auto.doc()
def get_node_list():
"""
Gets a list of all nodes visible during the past 30 days
:param network (optional): Filters the result set based on the given network
:return: json array [{"address":"127.0.0.1" ... }, {"address":"0.0.0.0", "port:6970}]
"""
q = db.session.query(Node.network, Node.address, Node.port, Node.user_agent, Node.version, Node.first_seen,
Node.last_seen, Node.last_checked, Node.country, Node.city, Node.asn, Node.aso).filter(
Node.seen)
if request.args.get("network") is not None:
network = request.args.get("network")
if network not in CONF['networks']:
return {"error": "network must be one of " + ", ".join(CONF['networks'])}
q = q.filter(Node.network == network)
return pd.read_sql(q.statement, q.session.bind).to_json(orient='records')
@app.route('/api/node_history', methods=['POST'])
@auto.doc()
def get_node_history():
"""
Returns the data associated with a node, and all crawler visitations on record
:param node: connection string, e.g. ok:127.0.0.1:6970 - port is optional if it is the network default.
:return: json dict {"node":{"user_agent":"/oktoshi/", "last_seen": ... }, "history":{"timestamp":157032190321,"height":56000, "success":1 ...}}
"""
node = request.form.get("node")
network, address, port, resp = deconstruct_address_string(node)
if network not in CONF['networks']:
return json.dumps({'error': "network not recognized"})
default_port = int(CONF['networks'][network]['port'])
resp = {}
try:
port = int(port) if port is not None else default_port
except ValueError:
resp['warning'] = "port not recognized, using default"
port = default_port
n = db.session.query(Node.network, Node.address, Node.port, Node.user_agent, Node.version, Node.first_seen,
Node.last_seen, Node.last_checked, Node.country, Node.city, Node.asn, Node.aso) \
.filter(and_(Node.network == network, Node.address == address, Node.port == port)).one()
q = db.session.query(NodeVisitation.timestamp, NodeVisitation.height, NodeVisitation.success) \
.join(Node, and_(Node.network == NodeVisitation.network, Node.address == NodeVisitation.address,
Node.port == NodeVisitation.port)) \
.filter(and_(Node.network == network, Node.address == address, Node.port == port)) \
.order_by(NodeVisitation.timestamp.desc())
df = pd.read_sql(q.statement, q.session.bind)
df['timestamp'] = df['timestamp'].astype(pd.np.int64) // 10 ** 9
resp.update({"node": {"network": n.network, 'address': n.address, "port": n.port, "user_agent": n.user_agent,
"version": n.version,
"first_seen": n.first_seen,
"last_seen": n.last_seen,
"last_checked": n.last_checked,
"country": n.country, "city": n.city, "asn": n.asn, "aso": n.aso},
"history": df.to_dict(orient='records')})
return to_json(resp)
def geocode(result):
if result and result['address'].endswith('.onion'):
aso, asn, country, city = "Anonymous", "Anonymous", "Anonymous", "Anonymous"
elif result:
try:
aso = ASN.asn(result['address']).autonomous_system_organization
asn = ASN.asn(result['address']).autonomous_system_number
except AddressNotFoundError:
aso = None
asn = None
try:
country = COUNTRY.country(result['address']).country.name
except AddressNotFoundError:
country = None
try:
city = CITY.city(result['address']).city.name
except AddressNotFoundError:
city = None
else:
return result
result['aso'] = aso
result['asn'] = asn
result['country'] = country
result['city'] = city
return result
def clean_dates(d):
for i in d:
if isinstance(d[i], datetime.datetime):
d[i] = d[i].timestamp()
if isinstance(d[i], dict):
d[i] = clean_dates(d[i])
return d
def to_json(d):
"""
Sanitizes a dictionary - converts datetime.datetime instances to timestamps
:param d: dictionary
:return: json string
"""
d = clean_dates(d)
return json.dumps(d)
def main():
app.run("0.0.0.0", debug=False if "--prod" in sys.argv else True, port=8888 if "--prod" in sys.argv else 5000)
# app.run("0.0.0.0", debug=False if "--prod" in sys.argv else True, port=443 if "--prod" in sys.argv else 5000, ssl_context=('/etc/letsencrypt/live/nodes.okcash.org/fullchain.pem', '/etc/letsencrypt/live/nodes.okcash.org/privkey.pem'))
if __name__ == '__main__':
main()
|
#!/usr/bin/python
import hashlib, re, sys, os, base64, time, random, hmac
# stripped down from https://github.com/vbuterin/pybitcointools/blob/master/bitcoin/main.py
### Elliptic curve parameters
P = 2**256-2**32-2**9-2**8-2**7-2**6-2**4-1
N = 115792089237316195423570985008687907852837564279074904382605163141518161494337
A = 0
B = 7
H = 1
Gx = 55066263022277343669578718895168534326250603453777594175500187360389116729240
Gy = 32670510020758816978083085130507043184471273380659243275938904335757337482424
G = (Gx,Gy)
### Extended Euclidean Algorithm
def inv(a,n):
lm, hm = 1,0
low, high = a%n,n
while low > 1:
r = high/low
nm, new = hm-lm*r, high-low*r
lm, low, hm, high = nm, new, lm, low
return lm % n
### Base switching
def get_code_string(base):
if base == 2: return '01'
elif base == 10: return '0123456789'
elif base == 16: return "0123456789abcdef"
elif base == 58: return "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
elif base == 256: return ''.join([chr(x) for x in range(256)])
else: raise ValueError("Invalid base!")
def encode(val,base,minlen=0):
base, minlen = int(base), int(minlen)
code_string = get_code_string(base)
result = ""
while val > 0:
result = code_string[val % base] + result
val /= base
if len(result) < minlen:
result = code_string[0]*(minlen-len(result))+result
return result
def decode(string,base):
base = int(base)
code_string = get_code_string(base)
result = 0
if base == 16: string = string.lower()
while len(string) > 0:
result *= base
result += code_string.find(string[0])
string = string[1:]
return result
def changebase(string,frm,to,minlen=0):
return encode(decode(string,frm),to,minlen)
### Elliptic Curve functions
def isinf(p): return p[0] == 0 and p[1] == 0
def base10_add(a,b):
if isinf(a): return b[0],b[1]
if isinf(b): return a[0],a[1]
if a[0] == b[0]:
if a[1] == b[1]: return base10_double((a[0],a[1]))
else: return (0,0)
m = ((b[1]-a[1]) * inv(b[0]-a[0],P)) % P
x = (m*m-a[0]-b[0]) % P
y = (m*(a[0]-x)-a[1]) % P
return (x,y)
def base10_double(a):
if isinf(a): return (0,0)
m = ((3*a[0]*a[0]+A)*inv(2*a[1],P)) % P
x = (m*m-2*a[0]) % P
y = (m*(a[0]-x)-a[1]) % P
return (x,y)
def base10_multiply(a,n):
if isinf(a) or n == 0: return (0,0)
if n == 1: return a
if n < 0 or n >= N: return base10_multiply(a,n%N)
if (n%2) == 0: return base10_double(base10_multiply(a,n/2))
if (n%2) == 1: return base10_add(base10_double(base10_multiply(a,n/2)),a)
# Functions for handling pubkey and privkey formats
def get_pubkey_format(pub):
if isinstance(pub,(tuple,list)): return 'decimal'
elif len(pub) == 65 and pub[0] == '\x04': return 'bin'
elif len(pub) == 130 and pub[0:2] == '04': return 'hex'
elif len(pub) == 33 and pub[0] in ['\x02','\x03']: return 'bin_compressed'
elif len(pub) == 66 and pub[0:2] in ['02','03']: return 'hex_compressed'
elif len(pub) == 64: return 'bin_electrum'
elif len(pub) == 128: return 'hex_electrum'
else: raise Exception("Pubkey not in recognized format")
def encode_pubkey(pub,formt):
if not isinstance(pub,(tuple,list)):
pub = decode_pubkey(pub)
if formt == 'decimal': return pub
elif formt == 'bin': return '\x04' + encode(pub[0],256,32) + encode(pub[1],256,32)
elif formt == 'bin_compressed': return chr(2+(pub[1]%2)) + encode(pub[0],256,32)
elif formt == 'hex': return '04' + encode(pub[0],16,64) + encode(pub[1],16,64)
elif formt == 'hex_compressed': return '0'+str(2+(pub[1]%2)) + encode(pub[0],16,64)
elif formt == 'bin_electrum': return encode(pub[0],256,32) + encode(pub[1],256,32)
elif formt == 'hex_electrum': return encode(pub[0],16,64) + encode(pub[1],16,64)
else: raise Exception("Invalid format!")
def decode_pubkey(pub,formt=None):
if not formt: formt = get_pubkey_format(pub)
if formt == 'decimal': return pub
elif formt == 'bin': return (decode(pub[1:33],256),decode(pub[33:65],256))
elif formt == 'bin_compressed':
x = decode(pub[1:33],256)
beta = pow(x*x*x+7,(P+1)/4,P)
y = (P-beta) if ((beta + ord(pub[0])) % 2) else beta
return (x,y)
elif formt == 'hex': return (decode(pub[2:66],16),decode(pub[66:130],16))
elif formt == 'hex_compressed':
return decode_pubkey(pub.decode('hex'),'bin_compressed')
elif formt == 'bin_electrum':
return (decode(pub[:32],256),decode(pub[32:64],256))
elif formt == 'hex_electrum':
return (decode(pub[:64],16),decode(pub[64:128],16))
else: raise Exception("Invalid format!")
def get_privkey_format(priv):
if isinstance(priv,(int,long)): return 'decimal'
elif len(priv) == 32: return 'bin'
elif len(priv) == 33: return 'bin_compressed'
elif len(priv) == 64: return 'hex'
elif len(priv) == 66: return 'hex_compressed'
else:
bin_p = b58check_to_bin(priv)
if len(bin_p) == 32: return 'wif'
elif len(bin_p) == 33: return 'wif_compressed'
else: raise Exception("WIF does not represent privkey")
def encode_privkey(priv,formt):
if not isinstance(priv,(int,long)):
return encode_privkey(decode_privkey(priv),formt)
if formt == 'decimal': return priv
elif formt == 'bin': return encode(priv,256,32)
elif formt == 'bin_compressed': return encode(priv,256,32)+'\x01'
elif formt == 'hex': return encode(priv,16,64)
elif formt == 'hex_compressed': return encode(priv,16,64)+'01'
else: raise Exception("Invalid format!")
def decode_privkey(priv,formt=None):
if not formt: formt = get_privkey_format(priv)
if formt == 'decimal': return priv
elif formt == 'bin': return decode(priv,256)
elif formt == 'bin_compressed': return decode(priv[:32],256)
elif formt == 'hex': return decode(priv,16)
elif formt == 'hex_compressed': return decode(priv[:64],16)
else: raise Exception("Invalid format!")
def add_pubkeys(p1,p2):
f1,f2 = get_pubkey_format(p1), get_pubkey_format(p2)
return encode_pubkey(base10_add(decode_pubkey(p1,f1),decode_pubkey(p2,f2)),f1)
def add_privkeys(p1,p2):
f1,f2 = get_privkey_format(p1), get_privkey_format(p2)
return encode_privkey((decode_privkey(p1,f1) + decode_privkey(p2,f2)) % N,f1)
def multiply(pubkey,privkey):
f1,f2 = get_pubkey_format(pubkey), get_privkey_format(privkey)
pubkey, privkey = decode_pubkey(pubkey,f1), decode_privkey(privkey,f2)
# http://safecurves.cr.yp.to/twist.html
if not isinf(pubkey) and (pubkey[0]**3+7-pubkey[1]*pubkey[1]) % P != 0:
raise Exception("Point not on curve")
return encode_pubkey(base10_multiply(pubkey,privkey),f1)
def divide(pubkey,privkey):
factor = inv(decode_privkey(privkey),N)
return multiply(pubkey,factor)
def compress(pubkey):
f = get_pubkey_format(pubkey)
if 'compressed' in f: return pubkey
elif f == 'bin': return encode_pubkey(decode_pubkey(pubkey,f),'bin_compressed')
elif f == 'hex' or f == 'decimal':
return encode_pubkey(decode_pubkey(pubkey,f),'hex_compressed')
def decompress(pubkey):
f = get_pubkey_format(pubkey)
if 'compressed' not in f: return pubkey
elif f == 'bin_compressed': return encode_pubkey(decode_pubkey(pubkey,f),'bin')
elif f == 'hex_compressed' or f == 'decimal':
return encode_pubkey(decode_pubkey(pubkey,f),'hex')
def privkey_to_pubkey(privkey):
f = get_privkey_format(privkey)
privkey = decode_privkey(privkey,f)
if privkey == 0 or privkey >= N:
raise Exception("Invalid privkey")
if f in ['bin','bin_compressed','hex','hex_compressed','decimal']:
return encode_pubkey(base10_multiply(G,privkey),f)
else:
return encode_pubkey(base10_multiply(G,privkey),f.replace('wif','hex'))
privtopub = privkey_to_pubkey
def privkey_to_address(priv,magicbyte=0):
return pubkey_to_address(privkey_to_pubkey(priv),magicbyte)
privtoaddr = privkey_to_address
def neg_pubkey(pubkey):
f = get_pubkey_format(pubkey)
pubkey = decode_pubkey(pubkey,f)
return encode_pubkey((pubkey[0],(P-pubkey[1]) % P),f)
def neg_privkey(privkey):
f = get_privkey_format(privkey)
privkey = decode_privkey(privkey,f)
return encode_privkey((N - privkey) % N,f)
def subtract_pubkeys(p1, p2):
f1,f2 = get_pubkey_format(p1), get_pubkey_format(p2)
k2 = decode_pubkey(p2,f2)
return encode_pubkey(base10_add(decode_pubkey(p1,f1),(k2[0],(P - k2[1]) % P)),f1)
def subtract_privkeys(p1, p2):
f1,f2 = get_privkey_format(p1), get_privkey_format(p2)
k2 = decode_privkey(p2,f2)
return encode_privkey((decode_privkey(p1,f1) - k2) % N,f1)
### Hashes
def bin_sha256(string):
return hashlib.sha256(string).digest()
def sha256(string):
return bin_sha256(string).encode('hex')
def hash_to_int(x):
if len(x) in [40,64]: return decode(x,16)
else: return decode(x,256)
def num_to_var_int(x):
x = int(x)
if x < 253: return chr(x)
elif x < 65536: return chr(253) + encode(x,256,2)[::-1]
elif x < 4294967296: return chr(254) + encode(x,256,4)[::-1]
else: return chr(255) + encode(x,256,8)[::-1]
### Encodings
### EDCSA
def encode_sig(v,r,s):
vb, rb, sb = chr(v), encode(r,256), encode(s,256)
return base64.b64encode(vb+'\x00'*(32-len(rb))+rb+'\x00'*(32-len(sb))+sb)
def decode_sig(sig):
bytez = base64.b64decode(sig)
return ord(bytez[0]), decode(bytez[1:33],256), decode(bytez[33:],256)
# https://tools.ietf.org/html/rfc6979#section-3.2
def deterministic_generate_k(msghash,priv):
v = '\x01' * 32
k = '\x00' * 32
priv = encode_privkey(priv,'bin')
msghash = encode(hash_to_int(msghash),256,32)
k = hmac.new(k, v+'\x00'+priv+msghash, hashlib.sha256).digest()
v = hmac.new(k, v, hashlib.sha256).digest()
k = hmac.new(k, v+'\x01'+priv+msghash, hashlib.sha256).digest()
v = hmac.new(k, v, hashlib.sha256).digest()
return decode(hmac.new(k, v, hashlib.sha256).digest(),256)
def ecdsa_raw_sign(msghash,priv):
z = hash_to_int(msghash)
k = deterministic_generate_k(msghash,priv)
r,y = base10_multiply(G,k)
s = inv(k,N) * (z + r*decode_privkey(priv)) % N
return 27+(y%2),r,s
def ecdsa_sign(msg,priv):
return encode_sig(*ecdsa_raw_sign(electrum_sig_hash(msg),priv))
def ecdsa_raw_verify(msghash,vrs,pub):
v,r,s = vrs
w = inv(s,N)
z = hash_to_int(msghash)
u1, u2 = z*w % N, r*w % N
x,y = base10_add(base10_multiply(G,u1), base10_multiply(decode_pubkey(pub),u2))
return r == x
def ecdsa_verify(msg,sig,pub):
return ecdsa_raw_verify(electrum_sig_hash(msg),decode_sig(sig),pub)
def ecdsa_raw_recover(msghash,vrs):
v,r,s = vrs
x = r
beta = pow(x*x*x+7,(P+1)/4,P)
y = beta if v%2 ^ beta%2 else (P - beta)
z = hash_to_int(msghash)
Qr = base10_add(neg_pubkey(base10_multiply(G,z)),base10_multiply((x,y),s))
Q = base10_multiply(Qr,inv(r,N))
if ecdsa_raw_verify(msghash,vrs,Q): return encode_pubkey(Q,'hex')
return False
def ecdsa_recover(msg,sig):
return ecdsa_raw_recover(electrum_sig_hash(msg),decode_sig(sig))
|
#
# ovirt-engine-setup -- ovirt engine setup
#
# Copyright oVirt Authors
# SPDX-License-Identifier: Apache-2.0
#
#
"""ovirt-host-setup vmconsole_proxy plugin."""
from otopi import util
from . import config
from . import pki
from . import system
@util.export
def createPlugins(context):
config.Plugin(context=context)
pki.Plugin(context=context)
system.Plugin(context=context)
# vim: expandtab tabstop=4 shiftwidth=4
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from st2client.models import core
LOG = logging.getLogger(__name__)
class PolicyType(core.Resource):
_alias = 'Policy-Type'
_display_name = 'Policy type'
_plural = 'PolicyTypes'
_plural_display_name = 'Policy types'
_repr_attributes = ['ref', 'enabled', 'description']
class Policy(core.Resource):
_plural = 'Policies'
_repr_attributes = ['name', 'pack', 'enabled', 'policy_type', 'resource_ref']
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###########################################################
# KivyCalendar (X11/MIT License)
# Calendar & Date picker widgets for Kivy (http://kivy.org)
# https://bitbucket.org/xxblx/kivycalendar
#
# Oleg Kozlov (xxblx), 2015
# https://xxblx.bitbucket.org/
###########################################################
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.popup import Popup
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.textinput import TextInput
from kivy.uix.label import Label
from kivy.core.window import Window
from kivy.factory import Factory
from kivy.properties import NumericProperty, ReferenceListProperty
from kivyblocks.i18n import I18n
from . import calendar_data as cal_data
###########################################################
Builder.load_string("""
<ArrowButton>:
background_normal: ""
background_down: ""
background_color: 1, 1, 1, 0
size_hint: .1, .1
<MonthYearLabel>:
pos_hint: {"top": 1, "center_x": .5}
size_hint: None, 0.1
halign: "center"
<MonthsManager>:
pos_hint: {"top": .9}
size_hint: 1, .9
<ButtonsGrid>:
cols: 7
rows: 7
size_hint: 1, 1
pos_hint: {"top": 1}
<DayAbbrLabel>:
text_size: self.size[0], None
halign: "center"
<DayAbbrWeekendLabel>:
color: 1, 0, 0, 1
<DayButton>:
group: "day_num"
<DayNumWeekendButton>:
background_color: 1, 0, 0, 1
""")
###########################################################
class DatePicker(TextInput):
"""
Date picker is a textinput, if it focused shows popup with calendar
which allows you to define the popup dimensions using pHint_x, pHint_y,
and the pHint lists, for example in kv:
DatePicker:
pHint: 0.7,0.4
would result in a size_hint of 0.7,0.4 being used to create the popup
"""
def __init__(self, touch_switch=False, value=None, *args, **kwargs):
super(DatePicker, self).__init__(*args, **kwargs)
self.touch_switch = touch_switch
self.init_ui(value)
def getValue(self):
return self.text
def setValue(self, sdate):
self.text = sdate
d = [int(i) for i in sdate.split('-')]
d.reverse()
self.cal.active_date = d
def init_ui(self, value):
if not value:
value = cal_data.today_date()
d = [int(i) for i in value.split('.')]
value = '%04d-%02d-%02d' % (d[2],d[1],d[0])
# Calendar
self.cal = CalendarWidget(as_popup=True,
touch_switch=self.touch_switch)
self.setValue(value)
# Popup
self.popup = Popup(content=self.cal, on_dismiss=self.update_value,
size_hint=(0.7,0.7),
title="")
self.cal.parent_popup = self.popup
self.bind(focus=self.show_popup)
def show_popup(self, isnt, val):
"""
Open popup if textinput focused,
and regardless update the popup size_hint
"""
if val:
# Automatically dismiss the keyboard
# that results from the textInput
Window.release_all_keyboards()
self.popup.open()
def update_value(self, inst):
""" Update textinput value on popup close """
d = self.cal.active_date
self.text = "%04d-%02d-%02d" % (d[2],d[1],d[0])
self.focus = False
class CalendarWidget(RelativeLayout):
""" Basic calendar widget """
def __init__(self, as_popup=False, touch_switch=False, *args, **kwargs):
super(CalendarWidget, self).__init__(*args, **kwargs)
self.i18n = I18n()
self.as_popup = as_popup
self.touch_switch = touch_switch
self.prepare_data()
self.init_ui()
def init_ui(self):
self.left_arrow = ArrowButton(text="<", on_press=self.go_prev,
pos_hint={"top": 1, "left": 0})
self.right_arrow = ArrowButton(text=">", on_press=self.go_next,
pos_hint={"top": 1, "right": 1})
self.add_widget(self.left_arrow)
self.add_widget(self.right_arrow)
# Title
self.title_label = MonthYearLabel(text=self.title)
self.add_widget(self.title_label)
# ScreenManager
self.sm = MonthsManager()
self.add_widget(self.sm)
self.create_month_scr(self.quarter[1], toogle_today=True)
def create_month_scr(self, month, toogle_today=False):
""" Screen with calendar for one month """
scr = Screen()
m = self.month_names_eng[self.active_date[1] - 1]
scr.name = "%s-%s" % (m, self.active_date[2]) # like march-2015
# Grid for days
grid_layout = ButtonsGrid()
scr.add_widget(grid_layout)
# Days abbrs
for i in range(7):
if i >= 5: # weekends
#l = DayAbbrWeekendLabel(text=self.days_abrs[i])
l = Factory.Text(text=self.days_abrs[i], i18n=True)
else: # work days
#l = DayAbbrLabel(text=self.days_abrs[i])
l = Factory.Text(text=self.days_abrs[i], i18n=True)
grid_layout.add_widget(l)
# Buttons with days numbers
for week in month:
for day in week:
if day[1] >= 5: # weekends
tbtn = DayNumWeekendButton(text=str(day[0]))
else: # work days
tbtn = DayNumButton(text=str(day[0]))
tbtn.bind(on_press=self.get_btn_value)
if toogle_today:
# Down today button
if day[0] == self.active_date[0] and day[2] == 1:
tbtn.state = "down"
# Disable buttons with days from other months
if day[2] == 0:
tbtn.disabled = True
grid_layout.add_widget(tbtn)
self.sm.add_widget(scr)
def prepare_data(self):
""" Prepare data for showing on widget loading """
# Get days abbrs and month names lists
self.month_names = cal_data.get_month_names()
self.month_names_eng = cal_data.get_month_names_eng()
self.days_abrs = cal_data.get_days_abbrs()
# Today date
self.active_date = cal_data.today_date_list()
# Set title
self.title = "%s - %s" % (self.i18n(self.month_names[self.active_date[1] - 1]),
self.active_date[2])
# Quarter where current month in the self.quarter[1]
self.get_quarter()
def get_quarter(self):
""" Get caledar and months/years nums for quarter """
self.quarter_nums = cal_data.calc_quarter(self.active_date[2],
self.active_date[1])
self.quarter = cal_data.get_quarter(self.active_date[2],
self.active_date[1])
def get_btn_value(self, inst):
""" Get day value from pressed button """
self.active_date[0] = int(inst.text)
if self.as_popup:
self.parent_popup.dismiss()
def go_prev(self, inst):
""" Go to screen with previous month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[0][1],
self.quarter_nums[0][0]]
# Name of prev screen
n = self.quarter_nums[0][1] - 1
prev_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[0][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(prev_scr_name):
self.create_month_scr(self.quarter[0])
self.sm.current = prev_scr_name
self.sm.transition.direction = "right"
self.get_quarter()
self.title = "%s - %s" % (self.i18n(self.month_names[self.active_date[1] - 1]),
self.active_date[2])
self.title_label.text = self.title
def go_next(self, inst):
""" Go to screen with next month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[2][1],
self.quarter_nums[2][0]]
# Name of prev screen
n = self.quarter_nums[2][1] - 1
next_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[2][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(next_scr_name):
self.create_month_scr(self.quarter[2])
self.sm.current = next_scr_name
self.sm.transition.direction = "left"
self.get_quarter()
self.title = "%s - %s" % (self.i18n(self.month_names[self.active_date[1] - 1]),
self.active_date[2])
self.title_label.text = self.title
def on_touch_move(self, touch):
""" Switch months pages by touch move """
if self.touch_switch:
# Left - prev
if touch.dpos[0] < -30:
self.go_prev(None)
# Right - next
elif touch.dpos[0] > 30:
self.go_next(None)
class ArrowButton(Button):
pass
class MonthYearLabel(Label):
pass
class MonthsManager(ScreenManager):
pass
class ButtonsGrid(GridLayout):
pass
class DayAbbrLabel(Label):
pass
class DayAbbrWeekendLabel(DayAbbrLabel):
pass
class DayButton(ToggleButton):
pass
class DayNumButton(DayButton):
pass
class DayNumWeekendButton(DayButton):
pass
|
"""
Utilities for API Gateway response formatting
"""
import json
def format_response(data, status=200):
return {
'body': json.dumps(data),
'headers': {
'Content-Type': 'application/json'
},
'statusCode': int(status)
}
def format_error(msg, code='BadRequest', status=400):
data = {
'success': False,
'error': {
'code': code,
'message': msg
}
}
return format_response(data, status)
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for VM tasks like spawn, snapshot, suspend, resume etc.
"""
import collections
import copy
import os
import time
import decorator
from oslo.config import cfg
from oslo.vmware import exceptions as vexc
from nova.api.metadata import base as instance_metadata
from nova import compute
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import utils
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import vif as vmwarevif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmware_images
CONF = cfg.CONF
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('remove_unused_base_images', 'nova.virt.imagecache')
CONF.import_opt('vnc_enabled', 'nova.vnc')
CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
'suspended': power_state.SUSPENDED}
RESIZE_TOTAL_STEPS = 4
DcInfo = collections.namedtuple('DcInfo',
['ref', 'name', 'vmFolder'])
class VirtualMachineInstanceConfigInfo(object):
"""Parameters needed to create and configure a new instance."""
def __init__(self, instance, instance_name, image_info,
datastore, dc_info, image_cache):
# Some methods called during spawn take the instance parameter purely
# for logging purposes.
# TODO(vui) Clean them up, so we no longer need to keep this variable
self.instance = instance
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
self.instance_name = instance_name or instance.uuid
self.ii = image_info
self.root_gb = instance.root_gb
self.datastore = datastore
self.dc_info = dc_info
self._image_cache = image_cache
@property
def cache_image_folder(self):
if self.ii.image_id is None:
return
return self._image_cache.get_image_cache_folder(
self.datastore, self.ii.image_id)
@property
def cache_image_path(self):
if self.ii.image_id is None:
return
cached_image_file_name = "%s.%s" % (self.ii.image_id,
self.ii.file_type)
return self.cache_image_folder.join(cached_image_file_name)
# Note(vui): See https://bugs.launchpad.net/nova/+bug/1363349
# for cases where mocking time.sleep() can have unintended effects on code
# not under test. For now, unblock the affected test cases by providing
# a wrapper function to work around needing to mock time.sleep()
def _time_sleep_wrapper(delay):
time.sleep(delay)
@decorator.decorator
def retry_if_task_in_progress(f, *args, **kwargs):
retries = max(CONF.vmware.api_retry_count, 1)
delay = 1
for attempt in range(1, retries + 1):
if attempt != 1:
_time_sleep_wrapper(delay)
delay = min(2 * delay, 60)
try:
f(*args, **kwargs)
return
except error_util.TaskInProgress:
pass
class VMwareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session, virtapi, volumeops, cluster=None,
datastore_regex=None):
"""Initializer."""
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops
self._cluster = cluster
self._datastore_regex = datastore_regex
# Ensure that the base folder is unique per compute node
if CONF.remove_unused_base_images:
self._base_folder = '%s%s' % (CONF.my_ip,
CONF.image_cache_subdirectory_name)
else:
# Aging disable ensures backward compatibility
self._base_folder = CONF.image_cache_subdirectory_name
self._tmp_folder = 'vmware_temp'
self._default_root_device = 'vda'
self._rescue_suffix = '-rescue'
self._migrate_suffix = '-orig'
self._datastore_dc_mapping = {}
self._datastore_browser_mapping = {}
self._imagecache = imagecache.ImageCacheManager(self._session,
self._base_folder)
def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
service_content = self._session._get_vim().service_content
LOG.debug("Extending root virtual disk to %s", requested_size)
vmdk_extend_task = self._session._call_method(
self._session._get_vim(),
"ExtendVirtualDisk_Task",
service_content.virtualDiskManager,
name=name,
datacenter=dc_ref,
newCapacityKb=requested_size,
eagerZero=False)
try:
self._session._wait_for_task(vmdk_extend_task)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Extending virtual disk failed with error: %s'),
e, instance=instance)
# Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name]
for file in files:
ds_path = ds_util.DatastorePath.parse(file)
self._delete_datastore_file(instance, ds_path, dc_ref)
LOG.debug("Extended root virtual disk")
def _delete_datastore_file(self, instance, datastore_path, dc_ref):
try:
ds_util.file_delete(self._session, datastore_path, dc_ref)
except (vexc.CannotDeleteFileException,
vexc.FileFaultException,
vexc.FileLockedException,
vexc.FileNotFoundException):
LOG.debug("Unable to delete %(ds)s. There may be more than "
"one process or thread trying to delete the file",
{'ds': datastore_path},
exc_info=True)
def _extend_if_required(self, dc_info, image_info, instance,
root_vmdk_path):
"""Increase the size of the root vmdk if necessary."""
if instance.root_gb > image_info.file_size_in_gb:
size_in_kb = instance.root_gb * units.Mi
self._extend_virtual_disk(instance, size_in_kb,
root_vmdk_path, dc_info.ref)
def _configure_config_drive(self, instance, vm_ref, dc_info, datastore,
injected_files, admin_password):
session_vim = self._session._get_vim()
cookies = session_vim.client.options.transport.cookiejar
uploaded_iso_path = self._create_config_drive(instance,
injected_files,
admin_password,
datastore.name,
dc_info.name,
instance['uuid'],
cookies)
uploaded_iso_path = datastore.build_path(uploaded_iso_path)
self._attach_cdrom_to_vm(
vm_ref, instance,
datastore.ref,
str(uploaded_iso_path))
def build_virtual_machine(self, instance, instance_name, image_info,
dc_info, datastore, network_info):
node_mo_id = vm_util.get_mo_id_from_instance(instance)
res_pool_ref = vm_util.get_res_pool_ref(self._session,
self._cluster, node_mo_id)
vif_infos = vmwarevif.get_vif_info(self._session,
self._cluster,
utils.is_neutron(),
image_info.vif_model,
network_info)
allocations = self._get_cpu_allocations(instance.instance_type_id)
# Get the create vm config spec
client_factory = self._session._get_vim().client.factory
config_spec = vm_util.get_vm_create_spec(client_factory,
instance,
instance_name,
datastore.name,
vif_infos,
image_info.os_type,
allocations=allocations)
# Create the VM
vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder,
config_spec, res_pool_ref)
return vm_ref
def _get_cpu_allocations(self, instance_type_id):
# Read flavors for allocations
flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance_type_id)
allocations = {}
for (key, type) in (('cpu_limit', int),
('cpu_reservation', int),
('cpu_shares_level', str),
('cpu_shares_share', int)):
value = flavor.extra_specs.get('quota:' + key)
if value:
allocations[key] = type(value)
return allocations
def _fetch_image_as_file(self, context, vi, image_ds_loc):
"""Download image as an individual file to host via HTTP PUT."""
session = self._session
session_vim = session._get_vim()
cookies = session_vim.client.options.transport.cookiejar
LOG.debug("Downloading image file data %(image_id)s to "
"%(file_path)s on the data store "
"%(datastore_name)s",
{'image_id': vi.ii.image_id,
'file_path': image_ds_loc,
'datastore_name': vi.datastore.name},
instance=vi.instance)
vmware_images.fetch_image(
context,
vi.instance,
session._host,
vi.dc_info.name,
vi.datastore.name,
image_ds_loc.rel_path,
cookies=cookies)
def _prepare_sparse_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, "tmp-sparse.vmdk")
return tmp_dir_loc, tmp_image_ds_loc
def _prepare_flat_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
ds_util.mkdir(self._session, tmp_image_ds_loc.parent, vi.dc_info.ref)
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(tmp_image_ds_loc),
vi.ii.file_size_in_kb)
flat_vmdk_name = vi.cache_image_path.basename.replace('.vmdk',
'-flat.vmdk')
flat_vmdk_ds_loc = tmp_dir_loc.join(vi.ii.image_id, flat_vmdk_name)
self._delete_datastore_file(vi.instance, str(flat_vmdk_ds_loc),
vi.dc_info.ref)
return tmp_dir_loc, flat_vmdk_ds_loc
def _prepare_iso_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
self._tmp_folder, uuidutils.generate_uuid())
tmp_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
return tmp_dir_loc, tmp_image_ds_loc
def _move_to_cache(self, dc_ref, src_folder_ds_path, dst_folder_ds_path):
try:
ds_util.file_move(self._session, dc_ref,
src_folder_ds_path, dst_folder_ds_path)
except vexc.FileAlreadyExistsException:
# Folder move has failed. This may be due to the fact that a
# process or thread has already completed the operation.
# Since image caching is synchronized, this can only happen
# due to action external to the process.
# In the event of a FileAlreadyExists we continue,
# all other exceptions will be raised.
LOG.warning(_LW("Destination %s already exists! Concurrent moves "
"can lead to unexpected results."),
dst_folder_ds_path)
def _cache_sparse_image(self, vi, tmp_image_ds_loc):
tmp_dir_loc = tmp_image_ds_loc.parent.parent
converted_image_ds_loc = tmp_dir_loc.join(
vi.ii.image_id, vi.cache_image_path.basename)
# converts fetched image to preallocated disk
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(tmp_image_ds_loc),
str(converted_image_ds_loc))
self._delete_datastore_file(vi.instance, str(tmp_image_ds_loc),
vi.dc_info.ref)
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_flat_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _cache_iso_image(self, vi, tmp_image_ds_loc):
self._move_to_cache(vi.dc_info.ref,
tmp_image_ds_loc.parent,
vi.cache_image_folder)
def _get_vm_config_info(self, instance, image_info, instance_name=None):
"""Captures all relevant information from the spawn parameters."""
if (instance.root_gb != 0 and
image_info.file_size_in_gb > instance.root_gb):
reason = _("Image disk size greater than requested disk size")
raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
datastore = ds_util.get_datastore(
self._session, self._cluster, self._datastore_regex)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
return VirtualMachineInstanceConfigInfo(instance,
instance_name,
image_info,
datastore,
dc_info,
self._imagecache)
def _get_image_callbacks(self, vi):
disk_type = vi.ii.disk_type
image_fetch = self._fetch_image_as_file
if vi.ii.is_iso:
image_prepare = self._prepare_iso_image
image_cache = self._cache_iso_image
elif disk_type == constants.DISK_TYPE_SPARSE:
image_prepare = self._prepare_sparse_image
image_cache = self._cache_sparse_image
elif disk_type in constants.SUPPORTED_FLAT_VARIANTS:
image_prepare = self._prepare_flat_image
image_cache = self._cache_flat_image
else:
reason = _("disk type '%s' not supported") % disk_type
raise exception.InvalidDiskInfo(reason=reason)
return image_prepare, image_fetch, image_cache
def _fetch_image_if_missing(self, context, vi):
image_prepare, image_fetch, image_cache = self._get_image_callbacks(vi)
LOG.debug("Processing image %s", vi.ii.image_id)
with lockutils.lock(str(vi.cache_image_path),
lock_file_prefix='nova-vmware-fetch_image'):
self.check_cache_folder(vi.datastore.name, vi.datastore.ref)
ds_browser = self._get_ds_browser(vi.datastore.ref)
if not ds_util.file_exists(self._session, ds_browser,
vi.cache_image_folder,
vi.cache_image_path.basename):
LOG.debug("Preparing fetch location")
tmp_dir_loc, tmp_image_ds_loc = image_prepare(vi)
LOG.debug("Fetch image to %s", tmp_image_ds_loc)
image_fetch(context, vi, tmp_image_ds_loc)
LOG.debug("Caching image")
image_cache(vi, tmp_image_ds_loc)
LOG.debug("Cleaning up location %s", str(tmp_dir_loc))
self._delete_datastore_file(vi.instance, str(tmp_dir_loc),
vi.dc_info.ref)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None,
instance_name=None, power_on=True):
client_factory = self._session._get_vim().client.factory
image_info = vmware_images.VMwareImage.from_image(instance.image_ref,
image_meta)
vi = self._get_vm_config_info(instance, image_info, instance_name)
# Creates the virtual machine. The virtual machine reference returned
# is unique within Virtual Center.
vm_ref = self.build_virtual_machine(instance,
vi.instance_name,
image_info,
vi.dc_info,
vi.datastore,
network_info)
# Cache the vm_ref. This saves a remote call to the VC. This uses the
# instance_name. This covers all use cases including rescue and resize.
vm_util.vm_ref_cache_update(vi.instance_name, vm_ref)
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info)
# Set the vnc configuration of the instance, vnc port starts from 5900
if CONF.vnc_enabled:
self._get_and_set_vnc_config(client_factory, instance)
block_device_mapping = []
if block_device_info is not None:
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
# NOTE(mdbooth): the logic here is that we ignore the image if there
# are block device mappings. This behaviour is incorrect, and a bug in
# the driver. We should be able to accept an image and block device
# mappings.
if len(block_device_mapping) > 0:
msg = "Block device information present: %s" % block_device_info
# NOTE(mriedem): block_device_info can contain an auth_password
# so we have to scrub the message before logging it.
LOG.debug(logging.mask_password(msg), instance=instance)
for root_disk in block_device_mapping:
connection_info = root_disk['connection_info']
# TODO(hartsocks): instance is unnecessary, remove it
# we still use instance in many locations for no other purpose
# than logging, can we simplify this?
self._volumeops.attach_root_volume(connection_info, instance,
self._default_root_device,
vi.datastore.ref)
else:
self._imagecache.enlist_image(
image_info.image_id, vi.datastore, vi.dc_info.ref)
self._fetch_image_if_missing(context, vi)
if image_info.is_iso:
self._use_iso_image(vm_ref, vi)
elif image_info.linked_clone:
self._use_disk_image_as_linked_clone(vm_ref, vi)
else:
self._use_disk_image_as_full_clone(vm_ref, vi)
if configdrive.required_by(instance):
self._configure_config_drive(
instance, vm_ref, vi.dc_info, vi.datastore,
injected_files, admin_password)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def _create_config_drive(self, instance, injected_files, admin_password,
data_store_name, dc_name, upload_folder, cookies):
if CONF.config_drive_format != 'iso9660':
reason = (_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
raise exception.InstancePowerOnFailure(reason=reason)
LOG.info(_('Using config drive for instance'), instance=instance)
extra_md = {}
if admin_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
try:
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
with utils.tempdir() as tmp_path:
tmp_file = os.path.join(tmp_path, 'configdrive.iso')
cdb.make_drive(tmp_file)
upload_iso_path = "%s/configdrive.iso" % (
upload_folder)
vmware_images.upload_iso_to_datastore(
tmp_file, instance,
host=self._session._host,
data_center_name=dc_name,
datastore_name=data_store_name,
cookies=cookies,
file_path=upload_iso_path)
return upload_iso_path
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance,
datastore, file_path):
"""Attach cdrom to VM by reconfiguration."""
client_factory = self._session._get_vim().client.factory
devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(controller_key, unit_number,
controller_spec) = vm_util.allocate_controller_key_and_unit_number(
client_factory,
devices,
'ide')
cdrom_attach_config_spec = vm_util.get_cdrom_attach_config_spec(
client_factory, datastore, file_path,
controller_key, unit_number)
if controller_spec:
cdrom_attach_config_spec.deviceChange.append(controller_spec)
LOG.debug("Reconfiguring VM instance to attach cdrom %s",
file_path, instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec)
LOG.debug("Reconfigured VM instance to attach cdrom %s",
file_path, instance=instance)
def _create_vm_snapshot(self, instance, vm_ref):
LOG.debug("Creating Snapshot of the VM instance", instance=instance)
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.uuid,
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
self._session._wait_for_task(snapshot_task)
LOG.debug("Created Snapshot of the VM instance", instance=instance)
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
snapshot_task, "Task", "info")
snapshot = task_info.result
return snapshot
@retry_if_task_in_progress
def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
LOG.debug("Deleting Snapshot of the VM instance", instance=instance)
delete_snapshot_task = self._session._call_method(
self._session._get_vim(),
"RemoveSnapshot_Task", snapshot,
removeChildren=False, consolidate=True)
self._session._wait_for_task(delete_snapshot_task)
LOG.debug("Deleted Snapshot of the VM instance", instance=instance)
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
Steps followed are:
1. Get the name of the vmdk file which the VM points to right now.
Can be a chain of snapshots, so we need to know the last in the
chain.
2. Create the snapshot. A new vmdk is created which the VM points to
now. The earlier vmdk becomes read-only.
3. Call CopyVirtualDisk which coalesces the disk chain to form a single
vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
service_content = self._session._get_vim().service_content
def _get_vm_and_vmdk_attribs():
# Get the vmdk file name that the VM is pointing to
hw_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_file_path_before_snapshot, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hw_devices, uuid=instance.uuid)
if not vmdk_file_path_before_snapshot:
LOG.debug("No root disk defined. Unable to snapshot.")
raise error_util.NoRootDiskDefined()
datastore_name = ds_util.DatastorePath.parse(
vmdk_file_path_before_snapshot).datastore
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
return (vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type)
(vmdk_file_path_before_snapshot, adapter_type, disk_type,
datastore_name, os_type) = _get_vm_and_vmdk_attribs()
snapshot = self._create_vm_snapshot(instance, vm_ref)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
def _check_if_tmp_folder_exists():
# Copy the contents of the VM that were there just before the
# snapshot was taken
ds_ref_ret = self._session._call_method(
vim_util, "get_dynamic_property", vm_ref, "VirtualMachine",
"datastore")
if ds_ref_ret is None:
raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
self.check_temp_folder(datastore_name, ds_ref)
return ds_ref
ds_ref = _check_if_tmp_folder_exists()
# Generate a random vmdk file name to which the coalesced vmdk content
# will be copied to. A random name is chosen so that we don't have
# name clashes.
random_name = uuidutils.generate_uuid()
dest_vmdk_file_path = ds_util.DatastorePath(
datastore_name, self._tmp_folder, "%s.vmdk" % random_name)
dest_vmdk_data_file_path = ds_util.DatastorePath(
datastore_name, self._tmp_folder, "%s-flat.vmdk" % random_name)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
def _copy_vmdk_content():
# Consolidate the snapshotted disk to a temporary vmdk.
LOG.debug('Copying snapshotted disk %s.',
vmdk_file_path_before_snapshot,
instance=instance)
copy_disk_task = self._session._call_method(
self._session._get_vim(),
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=vmdk_file_path_before_snapshot,
sourceDatacenter=dc_info.ref,
destName=str(dest_vmdk_file_path),
destDatacenter=dc_info.ref,
force=False)
self._session._wait_for_task(copy_disk_task)
LOG.debug('Copied snapshotted disk %s.',
vmdk_file_path_before_snapshot,
instance=instance)
_copy_vmdk_content()
self._delete_vm_snapshot(instance, vm_ref, snapshot)
cookies = self._session._get_vim().client.options.transport.cookiejar
def _upload_vmdk_to_image_repository():
# Upload the contents of -flat.vmdk file which has the disk data.
LOG.debug("Uploading image %s", image_id,
instance=instance)
vmware_images.upload_image(
context,
image_id,
instance,
os_type=os_type,
disk_type=constants.DEFAULT_DISK_TYPE,
adapter_type=adapter_type,
image_version=1,
host=self._session._host,
data_center_name=dc_info.name,
datastore_name=datastore_name,
cookies=cookies,
file_path="%s/%s-flat.vmdk" % (self._tmp_folder, random_name))
LOG.debug("Uploaded image %s", image_id,
instance=instance)
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
_upload_vmdk_to_image_repository()
def _clean_temp_data():
"""Delete temporary vmdk files generated in image handling
operations.
"""
# The data file is the one occupying space, and likelier to see
# deletion problems, so prioritize its deletion first. In the
# unlikely event that its deletion fails, the small descriptor file
# is retained too by design since it makes little sense to remove
# it when the data disk it refers to still lingers.
for f in dest_vmdk_data_file_path, dest_vmdk_file_path:
self._delete_datastore_file(instance, f, dc_info.ref)
_clean_temp_data()
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session, props)
pwr_state = query['runtime.powerState']
tools_status = query['summary.guest.toolsStatus']
tools_running_status = query['summary.guest.toolsRunningStatus']
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
reason = _("instance is not powered on")
raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
if (tools_status == "toolsOk" and
tools_running_status == "guestToolsRunning"):
LOG.debug("Rebooting guest OS of VM", instance=instance)
self._session._call_method(self._session._get_vim(), "RebootGuest",
vm_ref)
LOG.debug("Rebooted guest OS of VM", instance=instance)
else:
LOG.debug("Doing hard reboot of VM", instance=instance)
reset_task = self._session._call_method(self._session._get_vim(),
"ResetVM_Task", vm_ref)
self._session._wait_for_task(reset_task)
LOG.debug("Did hard reboot of VM", instance=instance)
def _destroy_instance(self, instance, destroy_disks=True,
instance_name=None):
# Destroy a VM instance
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
if instance_name is None:
instance_name = instance['uuid']
try:
vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
if vm_ref is None:
LOG.warning(_('Instance does not exist on backend'),
instance=instance)
return
lst_properties = ["config.files.vmPathName", "runtime.powerState",
"datastore"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, props)
pwr_state = query['runtime.powerState']
vm_config_pathname = query['config.files.vmPathName']
vm_ds_path = None
if vm_config_pathname:
vm_ds_path = ds_util.DatastorePath.parse(vm_config_pathname)
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
vm_util.power_off_instance(self._session, instance, vm_ref)
# Un-register the VM
try:
LOG.debug("Unregistering the VM", instance=instance)
self._session._call_method(self._session._get_vim(),
"UnregisterVM", vm_ref)
LOG.debug("Unregistered the VM", instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this "
"exception while un-registering the VM: %s"),
excep)
# Delete the folder holding the VM related content on
# the datastore.
if destroy_disks and vm_ds_path:
try:
dir_ds_compliant_path = vm_ds_path.parent
LOG.debug("Deleting contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
ds_ref_ret = query['datastore']
ds_ref = ds_ref_ret.ManagedObjectReference[0]
dc_info = self.get_datacenter_ref_and_name(ds_ref)
ds_util.file_delete(self._session,
dir_ds_compliant_path,
dc_info.ref)
LOG.debug("Deleted contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
instance=instance)
except Exception:
LOG.warn(_("In vmwareapi:vmops:_destroy_instance, "
"exception while deleting the VM contents from "
"the disk"), exc_info=True)
except Exception as exc:
LOG.exception(exc, instance=instance)
finally:
vm_util.vm_ref_cache_delete(instance_name)
def destroy(self, instance, destroy_disks=True):
"""Destroy a VM instance.
Steps followed for each VM are:
1. Power off, if it is in poweredOn state.
2. Un-register.
3. Delete the contents of the folder holding the VM related data.
"""
# If there is a rescue VM then we need to destroy that one too.
LOG.debug("Destroying instance", instance=instance)
if instance['vm_state'] == vm_states.RESCUED:
LOG.debug("Rescue VM configured", instance=instance)
try:
self.unrescue(instance, power_on=False)
LOG.debug("Rescue VM destroyed", instance=instance)
except Exception:
rescue_name = instance['uuid'] + self._rescue_suffix
self._destroy_instance(instance,
destroy_disks=destroy_disks,
instance_name=rescue_name)
# NOTE(arnaud): Destroy uuid-orig and uuid VMs iff it is not
# triggered by the revert resize api call. This prevents
# the uuid-orig VM to be deleted to be able to associate it later.
if instance.task_state != task_states.RESIZE_REVERTING:
# When VM deletion is triggered in middle of VM resize before VM
# arrive RESIZED state, uuid-orig VM need to deleted to avoid
# VM leak. Within method _destroy_instance it will check vmref
# exist or not before attempt deletion.
resize_orig_vmname = instance['uuid'] + self._migrate_suffix
vm_orig_ref = vm_util.get_vm_ref_from_name(self._session,
resize_orig_vmname)
if vm_orig_ref:
self._destroy_instance(instance,
destroy_disks=destroy_disks,
instance_name=resize_orig_vmname)
self._destroy_instance(instance, destroy_disks=destroy_disks)
LOG.debug("Instance destroyed", instance=instance)
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
def unpause(self, instance):
msg = _("unpause not supported for vmwareapi")
raise NotImplementedError(msg)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be suspended.
if pwr_state == "poweredOn":
LOG.debug("Suspending the VM", instance=instance)
suspend_task = self._session._call_method(self._session._get_vim(),
"SuspendVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Suspended the VM", instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
reason = _("instance is powered off and cannot be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
else:
LOG.debug("VM was already in suspended state. So returning "
"without doing anything", instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug("Resuming the VM", instance=instance)
suspend_task = self._session._call_method(
self._session._get_vim(),
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Resumed the VM", instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance.
- shutdown the instance VM.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
self.power_off(instance)
r_instance = copy.deepcopy(instance)
instance_name = r_instance.uuid + self._rescue_suffix
self.spawn(context, r_instance, image_meta,
None, None, network_info,
instance_name=instance_name,
power_on=False)
# Attach vmdk to the rescue VM
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_path, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance.uuid)
rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
self._volumeops.attach_disk_to_vm(
rescue_vm_ref, r_instance,
adapter_type, disk_type, vmdk_path)
vm_util.power_on_instance(self._session, r_instance,
vm_ref=rescue_vm_ref)
def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
# Get the original vmdk_path
vm_ref = vm_util.get_vm_ref(self._session, instance)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
(vmdk_path, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
hardware_devices, uuid=instance.uuid)
r_instance = copy.deepcopy(instance)
instance_name = r_instance.uuid + self._rescue_suffix
# detach the original instance disk from the rescue disk
vm_rescue_ref = vm_util.get_vm_ref_from_name(self._session,
instance_name)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_rescue_ref,
"VirtualMachine", "config.hardware.device")
device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path)
vm_util.power_off_instance(self._session, r_instance, vm_rescue_ref)
self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device)
self._destroy_instance(r_instance, instance_name=instance_name)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def power_off(self, instance):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
"""
vm_util.power_off_instance(self._session, instance)
def power_on(self, instance):
vm_util.power_on_instance(self._session, instance)
def _get_orig_vm_name_label(self, instance):
return instance.uuid + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the clone disk step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance.uuid
LOG.debug("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d",
{'instance_uuid': instance_uuid, 'progress': progress},
instance=instance)
instance.progress = progress
instance.save()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
# Checks if the migration needs a disk resize down.
if flavor['root_gb'] < instance['root_gb']:
reason = _("Unable to shrink disk.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Read the host_ref for the destination. If this is None then the
# VC will decide on placement
host_ref = self._get_host_ref_from_name(dest)
# 1. Power off the instance
self.power_off(instance)
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Disassociate the linked vsphere VM from the instance
vm_util.disassociate_vmref_from_instance(self._session, instance,
vm_ref,
suffix=self._migrate_suffix)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
ds_ref = ds_util.get_datastore(
self._session, self._cluster,
datastore_regex=self._datastore_regex).ref
dc_info = self.get_datacenter_ref_and_name(ds_ref)
# 3. Clone the VM for instance
vm_util.clone_vmref_for_instance(self._session, instance, vm_ref,
host_ref, ds_ref, dc_info.vmFolder)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# Destroy the original VM. The vm_ref needs to be searched using the
# instance.uuid + self._migrate_suffix as the identifier. We will
# not get the vm when searched using the instanceUuid but rather will
# be found using the uuid buried in the extraConfig
vm_ref = vm_util.search_vm_ref_by_identifier(self._session,
instance.uuid + self._migrate_suffix)
if vm_ref is None:
LOG.debug("instance not present", instance=instance)
return
try:
LOG.debug("Destroying the VM", instance=instance)
destroy_task = self._session._call_method(
self._session._get_vim(),
"Destroy_Task", vm_ref)
self._session._wait_for_task(destroy_task)
LOG.debug("Destroyed the VM", instance=instance)
except Exception as excep:
LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this "
"exception while destroying the VM: %s"), excep)
def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
vm_util.associate_vmref_for_instance(self._session, instance,
suffix=self._migrate_suffix)
if power_on:
vm_util.power_on_instance(self._session, instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
if resize_instance:
client_factory = self._session._get_vim().client.factory
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
instance)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
# Resize the disk (if larger)
old_root_gb = instance.system_metadata['old_instance_type_root_gb']
if instance['root_gb'] > int(old_root_gb):
root_disk_in_kb = instance['root_gb'] * units.Mi
vmdk_path = vm_util.get_vmdk_path(self._session, vm_ref,
instance)
data_store_ref = ds_util.get_datastore(self._session,
self._cluster, datastore_regex=self._datastore_regex).ref
dc_info = self.get_datacenter_ref_and_name(data_store_ref)
self._extend_virtual_disk(instance, root_disk_in_kb, vmdk_path,
dc_info.ref)
# TODO(ericwb): add extend for ephemeral disk
# 4. Start VM
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False):
"""Spawning live_migration operation for distributing high-load."""
vm_ref = vm_util.get_vm_ref(self._session, instance_ref)
host_ref = self._get_host_ref_from_name(dest)
if host_ref is None:
raise exception.HostNotFound(host=dest)
LOG.debug("Migrating VM to host %s", dest, instance=instance_ref)
try:
vm_migrate_task = self._session._call_method(
self._session._get_vim(),
"MigrateVM_Task", vm_ref,
host=host_ref,
priority="defaultPriority")
self._session._wait_for_task(vm_migrate_task)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance_ref, dest, block_migration)
post_method(context, instance_ref, dest, block_migration)
LOG.debug("Migrated VM to host %s", dest, instance=instance_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances."""
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance):
"""Return data about the VM instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
"runtime.powerState"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(
self._session, vm_props)
max_mem = int(query['summary.config.memorySizeMB']) * 1024
return {'state': VMWARE_POWER_STATES[query['runtime.powerState']],
'max_mem': max_mem,
'mem': max_mem,
'num_cpu': int(query['summary.config.numCpu']),
'cpu_time': 0}
def _get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config",
"summary.quickStats",
"summary.runtime"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session,
vm_props)
data = {}
# All of values received are objects. Convert them to dictionaries
for value in query.values():
prop_dict = vim_util.object_to_dict(value, list_depth=1)
data.update(prop_dict)
return data
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
# Add a namespace to all of the diagnostsics
return dict([('vmware:' + k, v) for k, v in data.items()])
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics."""
data = self._get_diagnostics(instance)
state = data.get('powerState')
if state:
state = power_state.STATE_MAP[VMWARE_POWER_STATES[state]]
uptime = data.get('uptimeSeconds', 0)
config_drive = configdrive.required_by(instance)
diags = diagnostics.Diagnostics(state=state,
driver='vmwareapi',
config_drive=config_drive,
hypervisor_os='esxi',
uptime=uptime)
diags.memory_details.maximum = data.get('memorySizeMB', 0)
diags.memory_details.used = data.get('guestMemoryUsage', 0)
# TODO(garyk): add in cpu, nic and disk stats
return diags
def _get_vnc_console_connection(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
opt_value = self._session._call_method(vim_util,
'get_dynamic_property',
vm_ref, 'VirtualMachine',
vm_util.VNC_CONFIG_KEY)
if opt_value:
port = int(opt_value.value)
else:
raise exception.ConsoleTypeUnavailable(console_type='vnc')
return {'port': port,
'internal_access_path': None}
@staticmethod
def _get_machine_id_str(network_info):
machine_id_str = ''
for vif in network_info:
# TODO(vish): add support for dns2
# TODO(sateesh): add support for injection of ipv6 configuration
network = vif['network']
ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None
subnets_v4 = [s for s in network['subnets'] if s['version'] == 4]
if len(subnets_v4) > 0:
if len(subnets_v4[0]['ips']) > 0:
ip_v4 = subnets_v4[0]['ips'][0]
if len(subnets_v4[0]['dns']) > 0:
dns = subnets_v4[0]['dns'][0]['address']
netmask_v4 = str(subnets_v4[0].as_netaddr().netmask)
gateway_v4 = subnets_v4[0]['gateway']['address']
broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast)
interface_str = ";".join([vif['address'],
ip_v4 and ip_v4['address'] or '',
netmask_v4 or '',
gateway_v4 or '',
broadcast_v4 or '',
dns or ''])
machine_id_str = machine_id_str + interface_str + '#'
return machine_id_str
def _set_machine_id(self, client_factory, instance, network_info):
"""Set the machine id of the VM for guest tools to pick up
and reconfigure the network interfaces.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory,
self._get_machine_id_str(network_info))
LOG.debug("Reconfiguring VM instance to set the machine id",
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, machine_id_change_spec)
LOG.debug("Reconfigured VM instance to set the machine id",
instance=instance)
@utils.synchronized('vmware.get_and_set_vnc_port')
def _get_and_set_vnc_config(self, client_factory, instance):
"""Set the vnc configuration of the VM."""
port = vm_util.get_vnc_port(self._session)
vm_ref = vm_util.get_vm_ref(self._session, instance)
vnc_config_spec = vm_util.get_vnc_config_spec(
client_factory, port)
LOG.debug("Reconfiguring VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, vnc_config_spec)
LOG.debug("Reconfigured VM instance to enable vnc on "
"port - %(port)s", {'port': port},
instance=instance)
def _get_ds_browser(self, ds_ref):
ds_browser = self._datastore_browser_mapping.get(ds_ref.value)
if not ds_browser:
ds_browser = self._session._call_method(
vim_util, "get_dynamic_property", ds_ref, "Datastore",
"browser")
self._datastore_browser_mapping[ds_ref.value] = ds_browser
return ds_browser
def _get_host_ref_from_name(self, host_name):
"""Get reference to the host with the name specified."""
host_objs = self._session._call_method(vim_util, "get_objects",
"HostSystem", ["name"])
vm_util._cancel_retrieve_if_necessary(self._session, host_objs)
for host in host_objs:
if hasattr(host, 'propSet'):
if host.propSet[0].val == host_name:
return host.obj
return None
def _get_vmfolder_ref(self):
"""Get the Vm folder ref from the datacenter."""
dc_objs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["vmFolder"])
vm_util._cancel_retrieve_if_necessary(self._session, dc_objs)
# There is only one default datacenter in a standalone ESX host
vm_folder_ref = dc_objs.objects[0].propSet[0].val
return vm_folder_ref
def _create_folder_if_missing(self, ds_name, ds_ref, folder):
"""Create a folder if it does not exist.
Currently there are two folder that are required on the datastore
- base folder - the folder to store cached images
- temp folder - the folder used for snapshot management and
image uploading
This method is aimed to be used for the management of those
folders to ensure that they are created if they are missing.
The ds_util method mkdir will be used to check if the folder
exists. If this throws and exception 'FileAlreadyExistsException'
then the folder already exists on the datastore.
"""
path = ds_util.DatastorePath(ds_name, folder)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
LOG.debug("Folder %s created.", path)
except vexc.FileAlreadyExistsException:
# NOTE(hartsocks): if the folder already exists, that
# just means the folder was prepped by another process.
pass
def check_cache_folder(self, ds_name, ds_ref):
"""Check that the cache folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._base_folder)
def check_temp_folder(self, ds_name, ds_ref):
"""Check that the temp folder exists."""
self._create_folder_if_missing(ds_name, ds_ref, self._tmp_folder)
def _check_if_folder_file_exists(self, ds_browser, ds_ref, ds_name,
folder_name, file_name):
# Ensure that the cache folder exists
self.check_cache_folder(ds_name, ds_ref)
# Check if the file exists or not.
folder_ds_path = ds_util.DatastorePath(ds_name, folder_name)
return ds_util.file_exists(
self._session, ds_browser, folder_ds_path, file_name)
def inject_network_info(self, instance, network_info):
"""inject network info for specified instance."""
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
client_factory = self._session._get_vim().client.factory
self._set_machine_id(client_factory, instance, network_info)
def manage_image_cache(self, context, instances):
if not CONF.remove_unused_base_images:
LOG.debug("Image aging disabled. Aging will not be done.")
return
datastores = ds_util.get_available_datastores(self._session,
self._cluster,
self._datastore_regex)
datastores_info = []
for ds in datastores:
dc_info = self.get_datacenter_ref_and_name(ds.ref)
datastores_info.append((ds, dc_info))
self._imagecache.update(context, instances, datastores_info)
def _get_valid_vms_from_retrieve_result(self, retrieve_result):
"""Returns list of valid vms from RetrieveResult object."""
lst_vm_names = []
while retrieve_result:
token = vm_util._get_token(retrieve_result)
for vm in retrieve_result.objects:
vm_name = None
conn_state = None
for prop in vm.propSet:
if prop.name == "name":
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
# Ignoring the orphaned or inaccessible VMs
if conn_state not in ["orphaned", "inaccessible"]:
lst_vm_names.append(vm_name)
if token:
retrieve_result = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return lst_vm_names
def instance_exists(self, instance):
try:
vm_util.get_vm_ref(self._session, instance)
return True
except exception.InstanceNotFound:
return False
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
vif_model = image_meta.get("hw_vif_model",
constants.DEFAULT_VIF_MODEL)
vif_model = vm_util.convert_vif_model(vif_model)
vif_info = vmwarevif.get_vif_dict(self._session, self._cluster,
vif_model, utils.is_neutron(), vif)
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_attach_port_index(self._session, vm_ref)
client_factory = self._session._get_vim().client.factory
attach_config_spec = vm_util.get_network_attach_config_spec(
client_factory, vif_info, port_index)
LOG.debug("Reconfiguring VM to attach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
attach_config_spec)
except Exception as e:
LOG.error(_LE('Attaching network adapter failed. Exception: '
' %s'),
e, instance=instance)
raise exception.InterfaceAttachFailed(
instance_uuid=instance['uuid'])
LOG.debug("Reconfigured VM to attach interface", instance=instance)
def detach_interface(self, instance, vif):
"""Detach an interface from the instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that there is not a race with the port index management
with lockutils.lock(instance.uuid,
lock_file_prefix='nova-vmware-hot-plug'):
port_index = vm_util.get_vm_detach_port_index(self._session,
vm_ref,
vif['id'])
if port_index is None:
msg = _("No device with interface-id %s exists on "
"VM") % vif['id']
raise exception.NotFound(msg)
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
device = vmwarevif.get_network_device(hardware_devices,
vif['address'])
if device is None:
msg = _("No device with MAC address %s exists on the "
"VM") % vif['address']
raise exception.NotFound(msg)
client_factory = self._session._get_vim().client.factory
detach_config_spec = vm_util.get_network_detach_config_spec(
client_factory, device, port_index)
LOG.debug("Reconfiguring VM to detach interface",
instance=instance)
try:
vm_util.reconfigure_vm(self._session, vm_ref,
detach_config_spec)
except Exception as e:
LOG.error(_LE('Detaching network adapter failed. Exception: '
'%s'),
e, instance=instance)
raise exception.InterfaceDetachFailed(
instance_uuid=instance['uuid'])
LOG.debug("Reconfigured VM to detach interface", instance=instance)
def _use_disk_image_as_full_clone(self, vm_ref, vi):
"""Uses cached image disk by copying it into the VM directory."""
instance_folder = vi.instance_name
root_disk_name = "%s.vmdk" % vi.instance_name
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(root_disk_ds_loc))
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(root_disk_ds_loc))
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, False)
def _sized_image_exists(self, sized_disk_ds_loc, ds_ref):
ds_browser = self._get_ds_browser(ds_ref)
return ds_util.file_exists(
self._session, ds_browser, sized_disk_ds_loc.parent,
sized_disk_ds_loc.basename)
def _use_disk_image_as_linked_clone(self, vm_ref, vi):
"""Uses cached image as parent of a COW child in the VM directory."""
sized_image_disk_name = "%s.vmdk" % vi.ii.image_id
if vi.root_gb > 0:
sized_image_disk_name = "%s.%s.vmdk" % (vi.ii.image_id, vi.root_gb)
sized_disk_ds_loc = vi.cache_image_folder.join(sized_image_disk_name)
# Ensure only a single thread extends the image at once.
# We do this by taking a lock on the name of the extended
# image. This allows multiple threads to create resized
# copies simultaneously, as long as they are different
# sizes. Threads attempting to create the same resized copy
# will be serialized, with only the first actually creating
# the copy.
#
# Note that the object is in a per-nova cache directory,
# so inter-nova locking is not a concern. Consequently we
# can safely use simple thread locks.
with lockutils.lock(str(sized_disk_ds_loc),
lock_file_prefix='nova-vmware-image'):
if not self._sized_image_exists(sized_disk_ds_loc,
vi.datastore.ref):
LOG.debug("Copying root disk of size %sGb", vi.root_gb)
try:
vm_util.copy_virtual_disk(
self._session,
vi.dc_info.ref,
str(vi.cache_image_path),
str(sized_disk_ds_loc))
except Exception as e:
LOG.warning(_("Root disk file creation "
"failed - %s"), e)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to copy cached '
'image %(source)s to '
'%(dest)s for resize: '
'%(error)s'),
{'source': vi.cache_image_path,
'dest': sized_disk_ds_loc,
'error': e.message})
try:
ds_util.file_delete(self._session,
sized_disk_ds_loc,
vi.dc_info.ref)
except vexc.FileNotFoundException:
# File was never created: cleanup not
# required
pass
# Resize the copy to the appropriate size. No need
# for cleanup up here, as _extend_virtual_disk
# already does it
self._extend_if_required(
vi.dc_info, vi.ii, vi.instance, str(sized_disk_ds_loc))
# Associate the sized image disk to the VM by attaching to the VM a
# COW child of said disk.
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(sized_disk_ds_loc),
vi.root_gb * units.Mi, vi.ii.linked_clone)
def _use_iso_image(self, vm_ref, vi):
"""Uses cached image as a bootable virtual cdrom."""
self._attach_cdrom_to_vm(
vm_ref, vi.instance, vi.datastore.ref,
str(vi.cache_image_path))
# Optionally create and attach blank disk
if vi.root_gb > 0:
instance_folder = vi.instance_name
root_disk_name = "%s.vmdk" % vi.instance_name
root_disk_ds_loc = vi.datastore.build_path(instance_folder,
root_disk_name)
# It is pointless to COW a blank disk
linked_clone = False
vm_util.create_virtual_disk(
self._session, vi.dc_info.ref,
vi.ii.adapter_type,
vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi)
self._volumeops.attach_disk_to_vm(
vm_ref, vi.instance,
vi.ii.adapter_type, vi.ii.disk_type,
str(root_disk_ds_loc),
vi.root_gb * units.Mi, linked_clone)
def _update_datacenter_cache_from_objects(self, dcs):
"""Updates the datastore/datacenter cache."""
while dcs:
token = vm_util._get_token(dcs)
for dco in dcs.objects:
dc_ref = dco.obj
ds_refs = []
prop_dict = vm_util.propset_dict(dco.propSet)
name = prop_dict.get('name')
vmFolder = prop_dict.get('vmFolder')
datastore_refs = prop_dict.get('datastore')
if datastore_refs:
datastore_refs = datastore_refs.ManagedObjectReference
for ds in datastore_refs:
ds_refs.append(ds.value)
else:
LOG.debug("Datacenter %s doesn't have any datastore "
"associated with it, ignoring it", name)
for ds_ref in ds_refs:
self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref,
name=name, vmFolder=vmFolder)
if token:
dcs = self._session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
def get_datacenter_ref_and_name(self, ds_ref):
"""Get the datacenter name and the reference."""
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
if not dc_info:
dcs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name", "datastore", "vmFolder"])
self._update_datacenter_cache_from_objects(dcs)
dc_info = self._datastore_dc_mapping.get(ds_ref.value)
return dc_info
def list_instances(self):
"""Lists the VM instances that are registered with vCenter cluster."""
properties = ['name', 'runtime.connectionState']
LOG.debug("Getting list of instances from cluster %s",
self._cluster)
vms = []
root_res_pool = self._session._call_method(
vim_util, "get_dynamic_property", self._cluster,
'ClusterComputeResource', 'resourcePool')
if root_res_pool:
vms = self._session._call_method(
vim_util, 'get_inner_objects', root_res_pool, 'vm',
'VirtualMachine', properties)
lst_vm_names = self._get_valid_vms_from_retrieve_result(vms)
LOG.debug("Got total of %s instances", str(len(lst_vm_names)))
return lst_vm_names
def get_vnc_console(self, instance):
"""Return connection info for a vnc console using vCenter logic."""
# vCenter does not run virtual machines and does not run
# a VNC proxy. Instead, you need to tell OpenStack to talk
# directly to the ESX host running the VM you are attempting
# to connect to via VNC.
vnc_console = self._get_vnc_console_connection(instance)
host_name = vm_util.get_host_name_for_vm(
self._session,
instance)
vnc_console['host'] = host_name
# NOTE: VM can move hosts in some situations. Debug for admins.
LOG.debug("VM %(uuid)s is currently on host %(host_name)s",
{'uuid': instance.name, 'host_name': host_name},
instance=instance)
return ctype.ConsoleVNC(**vnc_console)
|
from pathlib import Path
import json
# Directory
dir = Path().resolve()
# Configuration
with open(dir/'config.json') as config_file:
CONFIG = json.load(config_file)
def output_dir(config = CONFIG):
"""
Return string for default output directory path.
"""
return(config['output_dir']['path'])
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.ndimage import map_coordinates
from scipy.ndimage.interpolation import shift
from scipy.optimize import curve_fit, minimize
def reproject_image_into_polar(data, origin=None, Jacobian=False,
dr=1, dt=None):
"""
Reprojects a 2D numpy array (``data``) into a polar coordinate system.
"origin" is a tuple of (x0, y0) relative to the bottom-left image corner,
and defaults to the center of the image.
Parameters
----------
data : 2D np.array
origin : tuple
The coordinate of the image center, relative to bottom-left
Jacobian : boolean
Include ``r`` intensity scaling in the coordinate transform.
This should be included to account for the changing pixel size that
occurs during the transform.
dr : float
Radial coordinate spacing for the grid interpolation
tests show that there is not much point in going below 0.5
dt : float
Angular coordinate spacing (in radians)
if ``dt=None``, dt will be set such that the number of theta values
is equal to the maximum value between the height or the width of
the image.
Returns
-------
output : 2D np.array
The polar image (r, theta)
r_grid : 2D np.array
meshgrid of radial coordinates
theta_grid : 2D np.array
meshgrid of theta coordinates
Notes
-----
Adapted from:
http://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system
"""
# bottom-left coordinate system requires numpy image to be np.flipud
data = np.flipud(data)
ny, nx = data.shape[:2]
if origin is None:
origin = (nx//2, ny//2)
# Determine that the min and max r and theta coords will be...
x, y = index_coords(data, origin=origin) # (x,y) coordinates of each pixel
r, theta = cart2polar(x, y) # convert (x,y) -> (r,θ), note θ=0 is vertical
nr = np.int(np.ceil((r.max()-r.min())/dr))
if dt is None:
nt = max(nx, ny)
else:
# dt in radians
nt = np.int(np.ceil((theta.max()-theta.min())/dt))
# Make a regular (in polar space) grid based on the min and max r & theta
r_i = np.linspace(r.min(), r.max(), nr, endpoint=False)
theta_i = np.linspace(theta.min(), theta.max(), nt, endpoint=False)
theta_grid, r_grid = np.meshgrid(theta_i, r_i)
# Project the r and theta grid back into pixel coordinates
X, Y = polar2cart(r_grid, theta_grid)
X += origin[0] # We need to shift the origin
Y += origin[1] # back to the bottom-left corner...
xi, yi = X.flatten(), Y.flatten()
coords = np.vstack((yi, xi)) # (map_coordinates requires a 2xn array)
zi = map_coordinates(data, coords)
output = zi.reshape((nr, nt))
if Jacobian:
output = output*r_i[:, np.newaxis]
return output, r_grid, theta_grid
def index_coords(data, origin=None):
"""
Creates x & y coords for the indicies in a numpy array
Parameters
----------
data : numpy array
2D data
origin : (x,y) tuple
defaults to the center of the image. Specify origin=(0,0)
to set the origin to the *bottom-left* corner of the image.
Returns
-------
x, y : arrays
"""
ny, nx = data.shape[:2]
if origin is None:
origin_x, origin_y = nx//2, ny//2
else:
origin_x, origin_y = origin
x, y = np.meshgrid(np.arange(float(nx)), np.arange(float(ny)))
x -= origin_x
y -= origin_y
return x, y
def cart2polar(x, y):
"""
Transform Cartesian coordinates to polar
Parameters
----------
x, y : floats or arrays
Cartesian coordinates
Returns
-------
r, theta : floats or arrays
Polar coordinates
"""
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(x, y) # θ referenced to vertical
return r, theta
def polar2cart(r, theta):
"""
Transform polar coordinates to Cartesian
Parameters
-------
r, theta : floats or arrays
Polar coordinates
Returns
----------
x, y : floats or arrays
Cartesian coordinates
"""
y = r * np.cos(theta) # θ referenced to vertical
x = r * np.sin(theta)
return x, y
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
class okcoinusd (Exchange):
def describe(self):
return self.deep_extend(super(okcoinusd, self).describe(), {
'id': 'okcoinusd',
'name': 'OKCoin USD',
'countries': ['CN', 'US'],
'version': 'v1',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'has': {
'CORS': False,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'withdraw': True,
'futures': False,
},
'extension': '.do', # appended to endpoint URL
'timeframes': {
'1m': '1min',
'3m': '3min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'2h': '2hour',
'4h': '4hour',
'6h': '6hour',
'12h': '12hour',
'1d': '1day',
'3d': '3day',
'1w': '1week',
},
'api': {
'web': {
'get': [
'futures/pc/market/marketOverview', # todo: merge in fetchMarkets
'spot/markets/index-tickers', # todo: add fetchTickers
'spot/markets/currencies',
'spot/markets/products',
'spot/markets/tickers',
'spot/user-level',
],
},
'public': {
'get': [
'depth',
'exchange_rate',
'future_depth',
'future_estimated_price',
'future_hold_amount',
'future_index',
'future_kline',
'future_price_limit',
'future_ticker',
'future_trades',
'kline',
'otcs',
'ticker',
'tickers', # todo: add fetchTickers
'trades',
],
},
'private': {
'post': [
'account_records',
'batch_trade',
'borrow_money',
'borrow_order_info',
'borrows_info',
'cancel_borrow',
'cancel_order',
'cancel_otc_order',
'cancel_withdraw',
'funds_transfer',
'future_batch_trade',
'future_cancel',
'future_devolve',
'future_explosive',
'future_order_info',
'future_orders_info',
'future_position',
'future_position_4fix',
'future_trade',
'future_trades_history',
'future_userinfo',
'future_userinfo_4fix',
'lend_depth',
'order_fee',
'order_history',
'order_info',
'orders_info',
'otc_order_history',
'otc_order_info',
'repayment',
'submit_otc_order',
'trade',
'trade_history',
'trade_otc_order',
'wallet_info',
'withdraw',
'withdraw_info',
'unrepayments_info',
'userinfo',
],
},
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766791-89ffb502-5ee5-11e7-8a5b-c5950b68ac65.jpg',
'api': {
'web': 'https://www.okcoin.com/v2',
'public': 'https://www.okcoin.com/api',
'private': 'https://www.okcoin.com',
},
'www': 'https://www.okcoin.com',
'doc': [
'https://www.okcoin.com/docs/en/',
'https://www.npmjs.com/package/okcoin.com',
],
},
'fees': {
'trading': {
'taker': 0.002,
'maker': 0.002,
},
},
'exceptions': {
# see https://github.com/okcoin-okex/API-docs-OKEx.com/blob/master/API-For-Spot-EN/Error%20Code%20For%20Spot.md
'10000': ExchangeError, # "Required field, can not be null"
'10001': DDoSProtection, # "Request frequency too high to exceed the limit allowed"
'10005': AuthenticationError, # "'SecretKey' does not exist"
'10006': AuthenticationError, # "'Api_key' does not exist"
'10007': AuthenticationError, # "Signature does not match"
'1002': InsufficientFunds, # "The transaction amount exceed the balance"
'1003': InvalidOrder, # "The transaction amount is less than the minimum requirement"
'1004': InvalidOrder, # "The transaction amount is less than 0"
'1013': InvalidOrder, # no contract type(PR-1101)
'1027': InvalidOrder, # createLimitBuyOrder(symbol, 0, 0): Incorrect parameter may exceeded limits
'1050': InvalidOrder, # returned when trying to cancel an order that was filled or canceled previously
'1217': InvalidOrder, # "Order was sent at ±5% of the current market price. Please resend"
'10014': InvalidOrder, # "Order price must be between 0 and 1,000,000"
'1009': OrderNotFound, # for spot markets, cancelling closed order
'1019': OrderNotFound, # order closed?("Undo order failed")
'1051': OrderNotFound, # for spot markets, cancelling "just closed" order
'10009': OrderNotFound, # for spot markets, "Order does not exist"
'20015': OrderNotFound, # for future markets
'10008': ExchangeError, # Illegal URL parameter
# todo: sort out below
# 10000 Required parameter is empty
# 10001 Request frequency too high to exceed the limit allowed
# 10002 Authentication failure
# 10002 System error
# 10003 This connection has requested other user data
# 10004 Request failed
# 10005 api_key or sign is invalid, 'SecretKey' does not exist
# 10006 'Api_key' does not exist
# 10007 Signature does not match
# 10008 Illegal parameter, Parameter erorr
# 10009 Order does not exist
# 10010 Insufficient funds
# 10011 Amount too low
# 10012 Only btc_usd ltc_usd supported
# 10013 Only support https request
# 10014 Order price must be between 0 and 1,000,000
# 10015 Order price differs from current market price too much / Channel subscription temporally not available
# 10016 Insufficient coins balance
# 10017 API authorization error / WebSocket authorization error
# 10018 borrow amount less than lower limit [usd:100,btc:0.1,ltc:1]
# 10019 loan agreement not checked
# 1002 The transaction amount exceed the balance
# 10020 rate cannot exceed 1%
# 10021 rate cannot less than 0.01%
# 10023 fail to get latest ticker
# 10024 balance not sufficient
# 10025 quota is full, cannot borrow temporarily
# 10026 Loan(including reserved loan) and margin cannot be withdrawn
# 10027 Cannot withdraw within 24 hrs of authentication information modification
# 10028 Withdrawal amount exceeds daily limit
# 10029 Account has unpaid loan, please cancel/pay off the loan before withdraw
# 1003 The transaction amount is less than the minimum requirement
# 10031 Deposits can only be withdrawn after 6 confirmations
# 10032 Please enabled phone/google authenticator
# 10033 Fee higher than maximum network transaction fee
# 10034 Fee lower than minimum network transaction fee
# 10035 Insufficient BTC/LTC
# 10036 Withdrawal amount too low
# 10037 Trade password not set
# 1004 The transaction amount is less than 0
# 10040 Withdrawal cancellation fails
# 10041 Withdrawal address not exsit or approved
# 10042 Admin password error
# 10043 Account equity error, withdrawal failure
# 10044 fail to cancel borrowing order
# 10047 self function is disabled for sub-account
# 10048 withdrawal information does not exist
# 10049 User can not have more than 50 unfilled small orders(amount<0.15BTC)
# 10050 can't cancel more than once
# 10051 order completed transaction
# 10052 not allowed to withdraw
# 10064 after a USD deposit, that portion of assets will not be withdrawable for the next 48 hours
# 1007 No trading market information
# 1008 No latest market information
# 1009 No order
# 1010 Different user of the cancelled order and the original order
# 10100 User account frozen
# 10101 order type is wrong
# 10102 incorrect ID
# 10103 the private otc order's key incorrect
# 10106 API key domain not matched
# 1011 No documented user
# 1013 No order type
# 1014 No login
# 1015 No market depth information
# 1017 Date error
# 1018 Order failed
# 1019 Undo order failed
# 10216 Non-available API / non-public API
# 1024 Currency does not exist
# 1025 No chart type
# 1026 No base currency quantity
# 1027 Incorrect parameter may exceeded limits
# 1028 Reserved decimal failed
# 1029 Preparing
# 1030 Account has margin and futures, transactions can not be processed
# 1031 Insufficient Transferring Balance
# 1032 Transferring Not Allowed
# 1035 Password incorrect
# 1036 Google Verification code Invalid
# 1037 Google Verification code incorrect
# 1038 Google Verification replicated
# 1039 Message Verification Input exceed the limit
# 1040 Message Verification invalid
# 1041 Message Verification incorrect
# 1042 Wrong Google Verification Input exceed the limit
# 1043 Login password cannot be same as the trading password
# 1044 Old password incorrect
# 1045 2nd Verification Needed
# 1046 Please input old password
# 1048 Account Blocked
# 1050 Orders have been withdrawn or withdrawn
# 1051 Order completed
# 1201 Account Deleted at 00: 00
# 1202 Account Not Exist
# 1203 Insufficient Balance
# 1204 Invalid currency
# 1205 Invalid Account
# 1206 Cash Withdrawal Blocked
# 1207 Transfer Not Support
# 1208 No designated account
# 1209 Invalid api
# 1216 Market order temporarily suspended. Please send limit order
# 1217 Order was sent at ±5% of the current market price. Please resend
# 1218 Place order failed. Please try again later
# 20001 User does not exist
# 20002 Account frozen
# 20003 Account frozen due to forced liquidation
# 20004 Contract account frozen
# 20005 User contract account does not exist
# 20006 Required field missing
# 20007 Illegal parameter
# 20008 Contract account balance is too low
# 20009 Contract status error
# 20010 Risk rate ratio does not exist
# 20011 Risk rate lower than 90%/80% before opening BTC position with 10x/20x leverage. or risk rate lower than 80%/60% before opening LTC position with 10x/20x leverage
# 20012 Risk rate lower than 90%/80% after opening BTC position with 10x/20x leverage. or risk rate lower than 80%/60% after opening LTC position with 10x/20x leverage
# 20013 Temporally no counter party price
# 20014 System error
# 20015 Order does not exist
# 20016 Close amount bigger than your open positions, liquidation quantity bigger than holding
# 20017 Not authorized/illegal operation/illegal order ID
# 20018 Order price cannot be more than 103-105% or less than 95-97% of the previous minute price
# 20019 IP restricted from accessing the resource
# 20020 Secret key does not exist
# 20021 Index information does not exist
# 20022 Wrong API interface(Cross margin mode shall call cross margin API, fixed margin mode shall call fixed margin API)
# 20023 Account in fixed-margin mode
# 20024 Signature does not match
# 20025 Leverage rate error
# 20026 API Permission Error
# 20027 no transaction record
# 20028 no such contract
# 20029 Amount is large than available funds
# 20030 Account still has debts
# 20038 Due to regulation, self function is not availavle in the country/region your currently reside in.
# 20049 Request frequency too high
# 20100 request time out
# 20101 the format of data is error
# 20102 invalid login
# 20103 event type error
# 20104 subscription type error
# 20107 JSON format error
# 20115 The quote is not match
# 20116 Param not match
# 21020 Contracts are being delivered, orders cannot be placed
# 21021 Contracts are being settled, contracts cannot be placed
},
'options': {
'marketBuyPrice': False,
'defaultContractType': 'this_week', # next_week, quarter
'warnOnFetchOHLCVLimitArgument': True,
'fiats': ['USD', 'CNY'],
'futures': {
'BCH': True,
'BTC': True,
'BTG': True,
'EOS': True,
'ETC': True,
'ETH': True,
'LTC': True,
'NEO': True,
'QTUM': True,
'USDT': True,
'XRP': True,
},
},
})
async def fetch_markets(self, params={}):
response = await self.webGetSpotMarketsProducts()
markets = response['data']
result = []
for i in range(0, len(markets)):
id = markets[i]['symbol']
baseId, quoteId = id.split('_')
baseIdUppercase = baseId.upper()
quoteIdUppercase = quoteId.upper()
base = self.common_currency_code(baseIdUppercase)
quote = self.common_currency_code(quoteIdUppercase)
symbol = base + '/' + quote
precision = {
'amount': markets[i]['maxSizeDigit'],
'price': markets[i]['maxPriceDigit'],
}
minAmount = markets[i]['minTradeSize']
minPrice = math.pow(10, -precision['price'])
active = (markets[i]['online'] != 0)
baseNumericId = markets[i]['baseCurrency']
quoteNumericId = markets[i]['quoteCurrency']
market = self.extend(self.fees['trading'], {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'baseNumericId': baseNumericId,
'quoteNumericId': quoteNumericId,
'info': markets[i],
'type': 'spot',
'spot': True,
'future': False,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': minPrice,
'max': None,
},
'cost': {
'min': minAmount * minPrice,
'max': None,
},
},
})
result.append(market)
if (self.has['futures']) and(market['base'] in list(self.options['futures'].keys())):
fiats = self.options['fiats']
for j in range(0, len(fiats)):
fiat = fiats[j]
lowercaseFiat = fiat.lower()
result.append(self.extend(market, {
'quote': fiat,
'symbol': market['base'] + '/' + fiat,
'id': market['base'].lower() + '_' + lowercaseFiat,
'quoteId': lowercaseFiat,
'type': 'future',
'spot': False,
'future': True,
}))
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'publicGet'
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'Depth'
orderbook = await getattr(self, method)(self.extend(request, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
#
# { buy: "48.777300",
# change: "-1.244500",
# changePercentage: "-2.47%",
# close: "49.064000",
# createdDate: 1531704852254,
# currencyId: 527,
# dayHigh: "51.012500",
# dayLow: "48.124200",
# high: "51.012500",
# inflows: "0",
# last: "49.064000",
# low: "48.124200",
# marketFrom: 627,
# name: {},
# open: "50.308500",
# outflows: "0",
# productId: 527,
# sell: "49.064000",
# symbol: "zec_okb",
# volume: "1049.092535" }
#
timestamp = self.safe_integer_2(ticker, 'timestamp', 'createdDate')
symbol = None
if market is None:
if 'symbol' in ticker:
marketId = ticker['symbol']
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
else:
baseId, quoteId = ticker['symbol'].split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
open = self.safe_float(ticker, 'open')
change = self.safe_float(ticker, 'change')
percentage = self.safe_float(ticker, 'changePercentage')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': self.safe_float_2(ticker, 'vol', 'volume'),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'publicGet'
request = {
'symbol': market['id'],
}
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'Ticker'
response = await getattr(self, method)(self.extend(request, params))
ticker = self.safe_value(response, 'ticker')
if ticker is None:
raise ExchangeError(self.id + ' fetchTicker returned an empty response: ' + self.json(response))
timestamp = self.safe_integer(response, 'date')
if timestamp is not None:
timestamp *= 1000
ticker = self.extend(ticker, {'timestamp': timestamp})
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market=None):
symbol = None
if market:
symbol = market['symbol']
return {
'info': trade,
'timestamp': trade['date_ms'],
'datetime': self.iso8601(trade['date_ms']),
'symbol': symbol,
'id': str(trade['tid']),
'order': None,
'type': None,
'side': trade['type'],
'price': self.safe_float(trade, 'price'),
'amount': self.safe_float(trade, 'amount'),
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'publicGet'
request = {
'symbol': market['id'],
}
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'Trades'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
return [
ohlcv[0], # timestamp
float(ohlcv[1]), # Open
float(ohlcv[2]), # High
float(ohlcv[3]), # Low
float(ohlcv[4]), # Close
# float(ohlcv[5]), # quote volume
# float(ohlcv[6]), # base volume
float(ohlcv[volumeIndex]), # okex will return base volume in the 7th element for future markets
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'publicGet'
request = {
'symbol': market['id'],
'type': self.timeframes[timeframe],
}
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'Kline'
if limit is not None:
if self.options['warnOnFetchOHLCVLimitArgument']:
raise ExchangeError(self.id + ' fetchOHLCV counts "limit" candles from current time backwards, therefore the "limit" argument for ' + self.id + ' is disabled. Set ' + self.id + '.options["warnOnFetchOHLCVLimitArgument"] = False to suppress self warning message.')
request['size'] = int(limit) # max is 1440 candles
if since is not None:
request['since'] = since
else:
request['since'] = self.milliseconds() - 86400000 # last 24 hours
response = await getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostUserinfo(params)
balances = response['info']['funds']
result = {'info': response}
ids = list(balances['free'].keys())
usedField = 'freezed'
# wtf, okex?
# https://github.com/okcoin-okex/API-docs-OKEx.com/commit/01cf9dd57b1f984a8737ef76a037d4d3795d2ac7
if not(usedField in list(balances.keys())):
usedField = 'holds'
usedKeys = list(balances[usedField].keys())
ids = self.array_concat(ids, usedKeys)
for i in range(0, len(ids)):
id = ids[i]
code = id.upper()
if id in self.currencies_by_id:
code = self.currencies_by_id[id]['code']
else:
code = self.common_currency_code(code)
account = self.account()
account['free'] = self.safe_float(balances['free'], id, 0.0)
account['used'] = self.safe_float(balances[usedField], id, 0.0)
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'privatePost'
order = {
'symbol': market['id'],
'type': side,
}
if market['future']:
method += 'Future'
order = self.extend(order, {
'contract_type': self.options['defaultContractType'], # self_week, next_week, quarter
'match_price': 0, # match best counter party price? 0 or 1, ignores price if 1
'lever_rate': 10, # leverage rate value: 10 or 20(10 by default)
'price': price,
'amount': amount,
})
else:
if type == 'limit':
order['price'] = price
order['amount'] = amount
else:
order['type'] += '_market'
if side == 'buy':
if self.options['marketBuyPrice']:
if price is None:
# eslint-disable-next-line quotes
raise ExchangeError(self.id + " market buy orders require a price argument(the amount you want to spend or the cost of the order) when self.options['marketBuyPrice'] is True.")
order['price'] = price
else:
order['price'] = self.safe_float(params, 'cost')
if not order['price']:
# eslint-disable-next-line quotes
raise ExchangeError(self.id + " market buy orders require an additional cost parameter, cost = price * amount. If you want to pass the cost of the market order(the amount you want to spend) in the price argument(the default " + self.id + " behaviour), set self.options['marketBuyPrice'] = True. It will effectively suppress self warning exception as well.")
else:
order['amount'] = amount
params = self.omit(params, 'cost')
method += 'Trade'
response = await getattr(self, method)(self.extend(order, params))
timestamp = self.milliseconds()
return {
'info': response,
'id': str(response['order_id']),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
}
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'order_id': id,
}
method = 'privatePost'
if market['future']:
method += 'FutureCancel'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
else:
method += 'CancelOrder'
response = await getattr(self, method)(self.extend(request, params))
return response
def parse_order_status(self, status):
statuses = {
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_value(statuses, status, status)
def parse_order_side(self, side):
if side == 1:
return 'buy' # open long position
if side == 2:
return 'sell' # open short position
if side == 3:
return 'sell' # liquidate long position
if side == 4:
return 'buy' # liquidate short position
return side
def parse_order(self, order, market=None):
side = None
type = None
if 'type' in order:
if (order['type'] == 'buy') or (order['type'] == 'sell'):
side = order['type']
type = 'limit'
elif order['type'] == 'buy_market':
side = 'buy'
type = 'market'
elif order['type'] == 'sell_market':
side = 'sell'
type = 'market'
else:
side = self.parse_order_side(order['type'])
if ('contract_name' in list(order.keys())) or ('lever_rate' in list(order.keys())):
type = 'margin'
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
if market is None:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market:
symbol = market['symbol']
timestamp = None
createDateField = self.get_create_date_field()
if createDateField in order:
timestamp = order[createDateField]
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'deal_amount')
amount = max(amount, filled)
remaining = max(0, amount - filled)
if type == 'market':
remaining = 0
average = self.safe_float(order, 'avg_price')
# https://github.com/ccxt/ccxt/issues/2452
average = self.safe_float(order, 'price_avg', average)
cost = average * filled
result = {
'info': order,
'id': str(order['order_id']),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': order['price'],
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
def get_create_date_field(self):
# needed for derived exchanges
# allcoin typo create_data instead of create_date
return 'create_date'
def get_orders_field(self):
# needed for derived exchanges
# allcoin typo order instead of orders(expected based on their API docs)
return 'orders'
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrder requires a symbol parameter')
await self.load_markets()
market = self.market(symbol)
method = 'privatePost'
request = {
'order_id': id,
'symbol': market['id'],
# 'status': 0, # 0 for unfilled orders, 1 for filled orders
# 'current_page': 1, # current page number
# 'page_length': 200, # number of orders returned per page, maximum 200
}
if market['future']:
method += 'Future'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
method += 'OrderInfo'
response = await getattr(self, method)(self.extend(request, params))
ordersField = self.get_orders_field()
numOrders = len(response[ordersField])
if numOrders > 0:
return self.parse_order(response[ordersField][0])
raise OrderNotFound(self.id + ' order ' + id + ' not found')
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ExchangeError(self.id + ' fetchOrders requires a symbol parameter')
await self.load_markets()
market = self.market(symbol)
method = 'privatePost'
request = {
'symbol': market['id'],
}
order_id_in_params = ('order_id' in list(params.keys()))
if market['future']:
method += 'FutureOrdersInfo'
request['contract_type'] = self.options['defaultContractType'] # self_week, next_week, quarter
if not order_id_in_params:
raise ExchangeError(self.id + ' fetchOrders() requires order_id param for futures market ' + symbol + '(a string of one or more order ids, comma-separated)')
else:
status = None
if 'type' in params:
status = params['type']
elif 'status' in params:
status = params['status']
else:
name = 'type' if order_id_in_params else 'status'
raise ExchangeError(self.id + ' fetchOrders() requires ' + name + ' param for spot market ' + symbol + '(0 - for unfilled orders, 1 - for filled/canceled orders)')
if order_id_in_params:
method += 'OrdersInfo'
request = self.extend(request, {
'type': status,
'order_id': params['order_id'],
})
else:
method += 'OrderHistory'
request = self.extend(request, {
'status': status,
'current_page': 1, # current page number
'page_length': 200, # number of orders returned per page, maximum 200
})
params = self.omit(params, ['type', 'status'])
response = await getattr(self, method)(self.extend(request, params))
ordersField = self.get_orders_field()
return self.parse_orders(response[ordersField], market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
open = 0 # 0 for unfilled orders, 1 for filled orders
return await self.fetch_orders(symbol, since, limit, self.extend({
'status': open,
}, params))
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
closed = 1 # 0 for unfilled orders, 1 for filled orders
orders = await self.fetch_orders(symbol, since, limit, self.extend({
'status': closed,
}, params))
return orders
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
# if amount < 0.01:
# raise ExchangeError(self.id + ' withdraw() requires amount > 0.01')
# for some reason they require to supply a pair of currencies for withdrawing one currency
currencyId = currency['id'] + '_usd'
if tag:
address = address + ':' + tag
request = {
'symbol': currencyId,
'withdraw_address': address,
'withdraw_amount': amount,
'target': 'address', # or 'okcn', 'okcom', 'okex'
}
query = params
if 'chargefee' in query:
request['chargefee'] = query['chargefee']
query = self.omit(query, 'chargefee')
else:
raise ExchangeError(self.id + ' withdraw() requires a `chargefee` parameter')
if self.password:
request['trade_pwd'] = self.password
elif 'password' in query:
request['trade_pwd'] = query['password']
query = self.omit(query, 'password')
elif 'trade_pwd' in query:
request['trade_pwd'] = query['trade_pwd']
query = self.omit(query, 'trade_pwd')
passwordInRequest = ('trade_pwd' in list(request.keys()))
if not passwordInRequest:
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = await self.privatePostWithdraw(self.extend(request, query))
return {
'info': response,
'id': self.safe_string(response, 'withdraw_id'),
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api != 'web':
url += self.version + '/'
url += path
if api != 'web':
url += self.extension
if api == 'private':
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, params))
# secret key must be at the end of query
queryString = self.rawencode(query) + '&secret_key=' + self.secret
query['sign'] = self.hash(self.encode(queryString)).upper()
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
else:
if params:
url += '?' + self.urlencode(params)
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if len(body) < 2:
return # fallback to default error handler
if body[0] == '{':
if 'error_code' in response:
error = self.safe_string(response, 'error_code')
message = self.id + ' ' + self.json(response)
if error in self.exceptions:
ExceptionClass = self.exceptions[error]
raise ExceptionClass(message)
else:
raise ExchangeError(message)
if 'result' in response:
if not response['result']:
raise ExchangeError(self.id + ' ' + self.json(response))
|
'''
Description:
Shuffle a set of numbers without duplicates.
Example:
// Init an array with set 1, 2, and 3.
int[] nums = {1,2,3};
Solution solution = new Solution(nums);
// Shuffle the array [1,2,3] and return its result. Any permutation of [1,2,3] must equally likely to be returned.
solution.shuffle();
// Resets the array back to its original configuration [1,2,3].
solution.reset();
// Returns the random shuffling of array [1,2,3].
solution.shuffle();
Hint #1
The solution expects that we always use the original array to shuffle() else some of the test cases fail. (Credits; @snehasingh31)
'''
from typing import List
from random import shuffle
class Solution:
def __init__(self, nums: List[int]):
# copy to class member: self.array
self.array = [ *nums ]
self.origin = nums
def reset(self) -> List[int]:
"""
Resets the array to its original configuration and return it.
"""
self.array = [ *(self.origin) ]
return self.array
def shuffle(self) -> List[int]:
"""
Returns a random shuffling of the array.
"""
shuffle( self.array )
return self.array
# n : the length of input array
## Time Complexity: O( n )
#
# The overhead in time is the cost of shuffle and reset, which are of O( n ).
## Space Complexity: O( n )
#
# The overhead in space is the storage for class member, self.array, which is of O( n ).
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'sequence')
def test_bench():
t = TestEntry( sequence = [1,2,3] )
# reference output
# this is a challenge about randomness, so any valid permutation of shuffling result is accpeted.
'''
[1, 3, 2]
[1, 2, 3]
[3, 2, 1]
'''
obj = Solution( t.sequence )
print( obj.shuffle() )
print( obj.reset() )
print( obj.shuffle() )
if __name__ == '__main__':
test_bench()
|
# Copyright (c) 2019-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Please install UMAP before running the code
# use 'conda install -c conda-forge umap-learn' command to install it
import numpy as np
import pytest
import umap
import copy
import cupyx
import scipy.sparse
from cuml.manifold.umap import UMAP as cuUMAP
from cuml.test.utils import array_equal, unit_param, \
quality_param, stress_param
from sklearn.neighbors import NearestNeighbors
import joblib
from cuml.common import logger
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn.manifold import trustworthiness
from sklearn.metrics import adjusted_rand_score
dataset_names = ['iris', 'digits', 'wine', 'blobs']
@pytest.mark.parametrize('nrows', [unit_param(500), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('n_feats', [unit_param(20), quality_param(100),
stress_param(1000)])
def test_blobs_cluster(nrows, n_feats):
data, labels = datasets.make_blobs(
n_samples=nrows, n_features=n_feats, centers=5, random_state=0)
embedding = cuUMAP().fit_transform(data, convert_dtype=True)
if nrows < 500000:
score = adjusted_rand_score(labels,
KMeans(5).fit_predict(embedding))
assert score == 1.0
@pytest.mark.parametrize('nrows', [unit_param(500), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('n_feats', [unit_param(10), quality_param(100),
stress_param(1000)])
def test_umap_fit_transform_score(nrows, n_feats):
n_samples = nrows
n_features = n_feats
data, labels = make_blobs(n_samples=n_samples, n_features=n_features,
centers=10, random_state=42)
model = umap.UMAP(n_neighbors=10, min_dist=0.1)
cuml_model = cuUMAP(n_neighbors=10, min_dist=0.01)
embedding = model.fit_transform(data)
cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)
assert not np.isnan(embedding).any()
assert not np.isnan(cuml_embedding).any()
if nrows < 500000:
cuml_score = adjusted_rand_score(labels,
KMeans(10).fit_predict(
cuml_embedding))
score = adjusted_rand_score(labels,
KMeans(10).fit_predict(embedding))
assert array_equal(score, cuml_score, 1e-2, with_sign=True)
def test_supervised_umap_trustworthiness_on_iris():
iris = datasets.load_iris()
data = iris.data
embedding = cuUMAP(n_neighbors=10, random_state=0,
min_dist=0.01).fit_transform(
data, iris.target, convert_dtype=True)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert trust >= 0.97
def test_semisupervised_umap_trustworthiness_on_iris():
iris = datasets.load_iris()
data = iris.data
target = iris.target.copy()
target[25:75] = -1
embedding = cuUMAP(n_neighbors=10, random_state=0,
min_dist=0.01).fit_transform(
data, target, convert_dtype=True)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert trust >= 0.97
def test_umap_trustworthiness_on_iris():
iris = datasets.load_iris()
data = iris.data
embedding = cuUMAP(n_neighbors=10, min_dist=0.01,
random_state=0).fit_transform(
data, convert_dtype=True)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert trust >= 0.97
@pytest.mark.parametrize('target_metric', ["categorical", "euclidean"])
def test_umap_transform_on_iris(target_metric):
iris = datasets.load_iris()
iris_selection = np.random.RandomState(42).choice(
[True, False], 150, replace=True, p=[0.75, 0.25])
data = iris.data[iris_selection]
fitter = cuUMAP(n_neighbors=10, init="random", n_epochs=800, min_dist=0.01,
random_state=42, target_metric=target_metric)
fitter.fit(data, convert_dtype=True)
new_data = iris.data[~iris_selection]
embedding = fitter.transform(new_data, convert_dtype=True)
assert not np.isnan(embedding).any()
trust = trustworthiness(new_data, embedding, n_neighbors=10)
assert trust >= 0.85
@pytest.mark.parametrize('input_type', ['cupy', 'scipy'])
@pytest.mark.parametrize('xform_method', ['fit', 'fit_transform'])
@pytest.mark.parametrize('target_metric', ["categorical", "euclidean"])
def test_umap_transform_on_digits_sparse(target_metric, input_type,
xform_method):
digits = datasets.load_digits()
digits_selection = np.random.RandomState(42).choice(
[True, False], 1797, replace=True, p=[0.75, 0.25])
if input_type == 'cupy':
sp_prefix = cupyx.scipy.sparse
else:
sp_prefix = scipy.sparse
data = sp_prefix.csr_matrix(
scipy.sparse.csr_matrix(digits.data[digits_selection]))
fitter = cuUMAP(n_neighbors=15,
verbose=logger.level_info,
init="random",
n_epochs=0,
min_dist=0.01,
random_state=42,
target_metric=target_metric)
new_data = sp_prefix.csr_matrix(
scipy.sparse.csr_matrix(digits.data[~digits_selection]))
if xform_method == 'fit':
fitter.fit(data, convert_dtype=True)
embedding = fitter.transform(new_data, convert_dtype=True)
else:
embedding = fitter.fit_transform(new_data, convert_dtype=True)
if input_type == 'cupy':
embedding = embedding.get()
trust = trustworthiness(digits.data[~digits_selection], embedding,
n_neighbors=15)
assert trust >= 0.96
@pytest.mark.parametrize('target_metric', ["categorical", "euclidean"])
def test_umap_transform_on_digits(target_metric):
digits = datasets.load_digits()
digits_selection = np.random.RandomState(42).choice(
[True, False], 1797, replace=True, p=[0.75, 0.25])
data = digits.data[digits_selection]
fitter = cuUMAP(n_neighbors=15,
verbose=logger.level_debug,
init="random",
n_epochs=0,
min_dist=0.01,
random_state=42,
target_metric=target_metric)
fitter.fit(data, convert_dtype=True)
new_data = digits.data[~digits_selection]
embedding = fitter.transform(new_data, convert_dtype=True)
trust = trustworthiness(digits.data[~digits_selection], embedding,
n_neighbors=15)
assert trust >= 0.96
@pytest.mark.parametrize('target_metric', ["categorical", "euclidean"])
@pytest.mark.parametrize('name', dataset_names)
def test_umap_fit_transform_trust(name, target_metric):
if name == 'iris':
iris = datasets.load_iris()
data = iris.data
labels = iris.target
elif name == 'digits':
digits = datasets.load_digits(n_class=5)
data = digits.data
labels = digits.target
elif name == 'wine':
wine = datasets.load_wine()
data = wine.data
labels = wine.target
else:
data, labels = make_blobs(n_samples=500, n_features=10,
centers=10, random_state=42)
model = umap.UMAP(n_neighbors=10, min_dist=0.01,
target_metric=target_metric)
cuml_model = cuUMAP(n_neighbors=10, min_dist=0.01,
target_metric=target_metric)
embedding = model.fit_transform(data)
cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)
trust = trustworthiness(data, embedding, n_neighbors=10)
cuml_trust = trustworthiness(data, cuml_embedding, n_neighbors=10)
assert array_equal(trust, cuml_trust, 1e-1, with_sign=True)
@pytest.mark.parametrize('target_metric', ["categorical", "euclidean"])
@pytest.mark.parametrize('name', [unit_param('digits')])
@pytest.mark.parametrize('nrows', [quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('n_feats', [quality_param(100),
stress_param(1000)])
@pytest.mark.parametrize('should_downcast', [True])
@pytest.mark.parametrize('input_type', ['dataframe', 'ndarray'])
def test_umap_data_formats(input_type, should_downcast,
nrows, n_feats, name, target_metric):
dtype = np.float32 if not should_downcast else np.float64
n_samples = nrows
n_feats = n_feats
if name == 'digits':
# use the digits dataset for unit test
digits = datasets.load_digits(n_class=9)
X = digits["data"].astype(dtype)
else:
X, y = datasets.make_blobs(n_samples=n_samples,
n_features=n_feats, random_state=0)
umap = cuUMAP(n_neighbors=3, n_components=2, target_metric=target_metric)
embeds = umap.fit_transform(X)
assert type(embeds) == np.ndarray
@pytest.mark.parametrize('target_metric', ["categorical", "euclidean"])
@pytest.mark.filterwarnings("ignore:(.*)connected(.*):UserWarning:sklearn[.*]")
def test_umap_fit_transform_score_default(target_metric):
n_samples = 500
n_features = 20
data, labels = make_blobs(n_samples=n_samples, n_features=n_features,
centers=10, random_state=42)
model = umap.UMAP(target_metric=target_metric)
cuml_model = cuUMAP(target_metric=target_metric)
embedding = model.fit_transform(data)
cuml_embedding = cuml_model.fit_transform(data, convert_dtype=True)
cuml_score = adjusted_rand_score(labels,
KMeans(10).fit_predict(
cuml_embedding))
score = adjusted_rand_score(labels,
KMeans(10).fit_predict(embedding))
assert array_equal(score, cuml_score, 1e-2, with_sign=True)
def test_umap_fit_transform_against_fit_and_transform():
n_samples = 500
n_features = 20
data, labels = make_blobs(n_samples=n_samples, n_features=n_features,
centers=10, random_state=42)
"""
First test the default option does not hash the input
"""
cuml_model = cuUMAP()
ft_embedding = cuml_model.fit_transform(data, convert_dtype=True)
fit_embedding_same_input = cuml_model.transform(data, convert_dtype=True)
assert joblib.hash(ft_embedding) != joblib.hash(fit_embedding_same_input)
"""
Next, test explicitly enabling feature hashes the input
"""
cuml_model = cuUMAP(hash_input=True)
ft_embedding = cuml_model.fit_transform(data, convert_dtype=True)
fit_embedding_same_input = cuml_model.transform(data, convert_dtype=True)
assert joblib.hash(ft_embedding) == joblib.hash(fit_embedding_same_input)
fit_embedding_diff_input = cuml_model.transform(data[1:],
convert_dtype=True)
assert joblib.hash(ft_embedding) != joblib.hash(fit_embedding_diff_input)
@pytest.mark.parametrize('n_components,random_state',
[unit_param(2, None),
unit_param(2, 8),
unit_param(2, np.random.RandomState(42)),
unit_param(21, None),
unit_param(21, np.random.RandomState(42)),
unit_param(25, 8),
unit_param(50, None),
stress_param(50, 8)])
def test_umap_fit_transform_reproducibility(n_components, random_state):
n_samples = 8000
n_features = 200
if random_state is None:
n_components *= 2
data, labels = make_blobs(n_samples=n_samples, n_features=n_features,
centers=10, random_state=42)
def get_embedding(n_components, random_state):
reducer = cuUMAP(init="random",
n_components=n_components,
random_state=random_state)
return reducer.fit_transform(data, convert_dtype=True)
state = copy.copy(random_state)
cuml_embedding1 = get_embedding(n_components, state)
state = copy.copy(random_state)
cuml_embedding2 = get_embedding(n_components, state)
assert not np.isnan(cuml_embedding1).any()
assert not np.isnan(cuml_embedding2).any()
# Reproducibility threshold raised until intermittent failure is fixed
# Ref: https://github.com/rapidsai/cuml/issues/1903
mean_diff = np.mean(np.abs(cuml_embedding1 - cuml_embedding2))
if random_state is not None:
assert mean_diff == 0.0
else:
assert mean_diff > 0.5
@pytest.mark.parametrize('n_components,random_state',
[unit_param(2, None),
unit_param(2, 8),
unit_param(2, np.random.RandomState(42)),
unit_param(21, None),
unit_param(25, 8),
unit_param(25, np.random.RandomState(42)),
unit_param(50, None),
stress_param(50, 8)])
def test_umap_transform_reproducibility(n_components, random_state):
n_samples = 5000
n_features = 200
if random_state is None:
n_components *= 2
data, labels = make_blobs(n_samples=n_samples, n_features=n_features,
centers=10, random_state=42)
selection = np.random.RandomState(42).choice(
[True, False], n_samples, replace=True, p=[0.5, 0.5])
fit_data = data[selection]
transform_data = data[~selection]
def get_embedding(n_components, random_state):
reducer = cuUMAP(init="random",
n_components=n_components,
random_state=random_state)
reducer.fit(fit_data, convert_dtype=True)
return reducer.transform(transform_data, convert_dtype=True)
state = copy.copy(random_state)
cuml_embedding1 = get_embedding(n_components, state)
state = copy.copy(random_state)
cuml_embedding2 = get_embedding(n_components, state)
assert not np.isnan(cuml_embedding1).any()
assert not np.isnan(cuml_embedding2).any()
# Reproducibility threshold raised until intermittent failure is fixed
# Ref: https://github.com/rapidsai/cuml/issues/1903
mean_diff = np.mean(np.abs(cuml_embedding1 - cuml_embedding2))
if random_state is not None:
assert mean_diff == 0.0
else:
assert mean_diff > 0.5
def test_umap_fit_transform_trustworthiness_with_consistency_enabled():
iris = datasets.load_iris()
data = iris.data
algo = cuUMAP(n_neighbors=10, min_dist=0.01, init="random",
random_state=42)
embedding = algo.fit_transform(data, convert_dtype=True)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert trust >= 0.97
def test_umap_transform_trustworthiness_with_consistency_enabled():
iris = datasets.load_iris()
data = iris.data
selection = np.random.RandomState(42).choice(
[True, False], data.shape[0], replace=True, p=[0.5, 0.5])
fit_data = data[selection]
transform_data = data[~selection]
model = cuUMAP(n_neighbors=10, min_dist=0.01, init="random",
random_state=42)
model.fit(fit_data, convert_dtype=True)
embedding = model.transform(transform_data, convert_dtype=True)
trust = trustworthiness(transform_data, embedding, n_neighbors=10)
assert trust >= 0.92
@pytest.mark.filterwarnings("ignore:(.*)zero(.*)::scipy[.*]|umap[.*]")
def test_exp_decay_params():
def compare_exp_decay_params(a=None, b=None, min_dist=0.1, spread=1.0):
cuml_model = cuUMAP(a=a, b=b, min_dist=min_dist, spread=spread)
state = cuml_model.__getstate__()
cuml_a, cuml_b = state['a'], state['b']
skl_model = umap.UMAP(a=a, b=b, min_dist=min_dist, spread=spread)
skl_model.fit(np.zeros((1, 1)))
sklearn_a, sklearn_b = skl_model._a, skl_model._b
assert abs(cuml_a) - abs(sklearn_a) < 1e-6
assert abs(cuml_b) - abs(sklearn_b) < 1e-6
compare_exp_decay_params(min_dist=0.1, spread=1.0)
compare_exp_decay_params(a=0.5, b=2.0)
compare_exp_decay_params(a=0.5)
compare_exp_decay_params(b=0.5)
compare_exp_decay_params(min_dist=0.1, spread=10.0)
@pytest.mark.parametrize('n_neighbors', [5, 15])
def test_umap_knn_parameters(n_neighbors):
data, labels = datasets.make_blobs(
n_samples=2000, n_features=10, centers=5, random_state=0)
data = data.astype(np.float32)
def fit_transform_embed(knn_graph=None):
model = cuUMAP(random_state=42,
init='random',
n_neighbors=n_neighbors)
return model.fit_transform(data, knn_graph=knn_graph,
convert_dtype=True)
def transform_embed(knn_graph=None):
model = cuUMAP(random_state=42,
init='random',
n_neighbors=n_neighbors)
model.fit(data, knn_graph=knn_graph, convert_dtype=True)
return model.transform(data, knn_graph=knn_graph,
convert_dtype=True)
def test_trustworthiness(embedding):
trust = trustworthiness(data, embedding, n_neighbors=n_neighbors)
assert trust >= 0.92
def test_equality(e1, e2):
mean_diff = np.mean(np.abs(e1 - e2))
print("mean diff: %s" % mean_diff)
assert mean_diff < 1.0
neigh = NearestNeighbors(n_neighbors=n_neighbors)
neigh.fit(data)
knn_graph = neigh.kneighbors_graph(data, mode="distance")
embedding1 = fit_transform_embed(None)
embedding2 = fit_transform_embed(knn_graph.tocsr())
embedding3 = fit_transform_embed(knn_graph.tocoo())
embedding4 = fit_transform_embed(knn_graph.tocsc())
embedding5 = transform_embed(knn_graph.tocsr())
embedding6 = transform_embed(knn_graph.tocoo())
embedding7 = transform_embed(knn_graph.tocsc())
test_trustworthiness(embedding1)
test_trustworthiness(embedding2)
test_trustworthiness(embedding3)
test_trustworthiness(embedding4)
test_trustworthiness(embedding5)
test_trustworthiness(embedding6)
test_trustworthiness(embedding7)
test_equality(embedding2, embedding3)
test_equality(embedding3, embedding4)
test_equality(embedding5, embedding6)
test_equality(embedding6, embedding7)
|
# Given a binary tree, find its maximum depth.
# The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxDepth(self, root):
if root is None:
return 0
L = 0;
R = 0;
if not (root.left is None):
L = self.maxDepth(root.left)
if not (root.right is None):
R = self.maxDepth(root.right)
return (L + 1) if L > R else (R + 1)
# if __name__ == "__main__" :
# root = TreeNode(1)
# root.left = TreeNode(1)
# root.right = TreeNode(1)
# s = Solution()
# print s.maxDepth(root)
# print "hello"
|
import os
def shell():
inputuser = input("> ")
if (inputuser == "exit"):
exit()
os.system(inputuser)
return shell()
print("Welcome to duckshell (python alpha)")
print("System info: ")
os.system("echo distro: $(uname -o)");
os.system("echo hostname: $(uname -n)");
os.system("echo architecture: $(uname -m)");
os.system("echo linux kernel: $(uname -r)");
os.system("echo python version: $(python --version)")
shell()
|
import logging
import subprocess
import click
try:
from instance import settings
APP_ROOT = settings.APP_ROOT
except ImportError:
logging.error('Ensure __init__.py and settings.py both exist in instance/')
exit(1)
except AttributeError:
from config import settings
APP_ROOT = settings.APP_ROOT
PACKAGE_PATH = '{0}/{1}'.format(APP_ROOT, '/catwatch')
@click.command()
@click.argument('path', default=PACKAGE_PATH)
def cli(path):
"""
Run test coverage report.
:return: Subprocess call result
"""
cmd = 'py.test --cov-report term-missing --cov {0} {0}'.format(path)
return subprocess.call(cmd, shell=True)
|
import os
import time
from multiprocessing import Process
from typing import Tuple
import flwr as fl
import numpy as np
import tensorflow as tf
from flwr.server.strategy import FedAvg
import dataset
# generate random integer values
from random import seed
from random import randint
# Make TensorFlow log less verbose
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# K: Prevent TF from using GPU (not enough memory)
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
DATASET = Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]
def start_server(num_rounds: int, num_clients: int, fraction_fit: float):
"""Start the server with a slightly adjusted FedAvg strategy."""
strategy = FedAvg(min_available_clients=num_clients, fraction_fit=fraction_fit)
# Exposes the server by default on port 8080
fl.server.start_server(strategy=strategy, config={"num_rounds": num_rounds})
def start_client(dataset: DATASET) -> None:
"""Start a single client with the provided dataset."""
# Load and compile a Keras model for CIFAR-10
#model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(32, 32, 3)),
tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile("adam", "sparse_categorical_crossentropy", metrics=[tf.keras.metrics.CategoricalAccuracy(), tf.keras.metrics.MeanSquaredError()])
### @TODO: check if "accuracy" and tf.keras.metrics.CategoricalAccuracy() return the same results
# Unpack the CIFAR-10 dataset partition
(x_train, y_train), (x_test, y_test) = dataset
# Define a Flower client
class CifarClient(fl.client.NumPyClient):
def get_parameters(self):
"""Return current weights."""
return model.get_weights()
def fit(self, parameters, config):
"""Fit model and return new weights as well as number of training
examples."""
model.set_weights(parameters)
# Remove steps_per_epoch if you want to train over the full dataset
# https://keras.io/api/models/model_training_apis/#fit-method
#nap_time = randint (0, 5)
#time.sleep (nap_time)
#print ("Slept for", nap_time, "seconds.")
model.fit(x_train, y_train, epochs=10, batch_size=256, steps_per_epoch=10)
return model.get_weights(), len(x_train), {}
def evaluate(self, parameters, config):
"""Evaluate using provided parameters."""
model.set_weights(parameters)
loss, accuracy, mse = model.evaluate(x_test, y_test)
print ('"Loss:', loss, ". Accuracy:", accuracy, ". MSE:", mse, ".")
return loss, len(x_test), {"accuracy": accuracy}
# Start Flower client
fl.client.start_numpy_client("0.0.0.0:8080", client=CifarClient())
def run_simulation(num_rounds: int, num_clients: int, fraction_fit: float):
"""Start a FL simulation."""
# This will hold all the processes which we are going to create
processes = []
# Start the server
server_process = Process(
target=start_server, args=(num_rounds, num_clients, fraction_fit)
)
server_process.start()
processes.append(server_process)
# Optionally block the script here for a second or two so the server has time to start
time.sleep(2)
# Load the dataset partitions
partitions = dataset.load(num_partitions=num_clients)
# Start all the clients
for partition in partitions:
client_process = Process(target=start_client, args=(partition,))
client_process.start()
processes.append(client_process)
# Block until all processes are finished
for p in processes:
p.join()
if __name__ == "__main__":
run_simulation(num_rounds=100, num_clients=5, fraction_fit=0.5)
|
#!/usr/bin/env python3
elements = (1, 2, 4)
bitset = 0 # 1. Initialize bitset
# Add elements to bitset
for i in elements:
bitset = bitset | 1<<i # 2. Add element to bitset
# Print contents of bitset
print(bitset)
# Test for elements in bitset
for i in range(6):
if bitset & 1<<i: # 3. Test if element is in bitset
print(i)
# Remove elements from bitset
for i in elements:
bitset = bitset & ~(1<<i) # 4. Remove element from bitset
# Print contents of bitset
print(bitset)
|
'''
The management of salt command line utilities are stored in here
'''
# Import python libs
import os
import sys
# Import salt components
import salt.cli.caller
import salt.cli.cp
import salt.cli.key
import salt.cli.batch
import salt.client
import salt.output
import salt.runner
import optparse
from salt.utils import parsers
from salt.utils.verify import verify_env
from salt.version import __version__ as VERSION
from salt.exceptions import SaltInvocationError, SaltClientError, SaltException
class SaltCMD(parsers.SaltCMDOptionParser):
'''
The execution of a salt command happens here
'''
def run(self):
'''
Execute the salt command line
'''
self.parse_args()
try:
local = salt.client.LocalClient(self.get_config_file_path('master'))
except SaltClientError as exc:
self.exit(2, '{0}\n'.format(exc))
return
if self.options.query:
ret = local.find_cmd(self.config['cmd'])
for jid in ret:
if isinstance(ret, list) or isinstance(ret, dict):
print('Return data for job {0}:'.format(jid))
salt.output.display_output(ret[jid], None, self.config)
print('')
elif self.options.batch:
batch = salt.cli.batch.Batch(self.config)
batch.run()
else:
if self.options.timeout <= 0:
self.options.timeout = local.opts['timeout']
args = [
self.config['tgt'],
self.config['fun'],
self.config['arg'],
self.options.timeout,
]
if self.selected_target_option:
args.append(self.selected_target_option)
else:
args.append('glob')
if getattr(self.options, 'return'):
args.append(getattr(self.options, 'return'))
else:
args.append('')
try:
# local will be None when there was an error
if local:
if self.options.static:
if self.options.verbose:
args.append(True)
full_ret = local.cmd_full_return(*args)
ret, out = self._format_ret(full_ret)
self._output_ret(ret, out)
elif self.config['fun'] == 'sys.doc':
ret = {}
out = ''
for full_ret in local.cmd_cli(*args):
ret_, out = self._format_ret(full_ret)
ret.update(ret_)
self._output_ret(ret, out)
else:
if self.options.verbose:
args.append(True)
for full_ret in local.cmd_cli(*args):
ret, out = self._format_ret(full_ret)
self._output_ret(ret, out)
except SaltInvocationError as exc:
ret = exc
out = ''
def _output_ret(self, ret, out):
'''
Print the output from a single return to the terminal
'''
# Handle special case commands
if self.config['fun'] == 'sys.doc':
self._print_docs(ret)
else:
# Determine the proper output method and run it
salt.output.display_output(ret, out, self.config)
def _format_ret(self, full_ret):
'''
Take the full return data and format it to simple output
'''
ret = {}
out = ''
for key, data in full_ret.items():
ret[key] = data['ret']
if 'out' in data:
out = data['out']
return ret, out
def _print_docs(self, ret):
'''
Print out the docstrings for all of the functions on the minions
'''
docs = {}
if not ret:
self.exit(2, 'No minions found to gather docs from\n')
for host in ret:
for fun in ret[host]:
if fun not in docs:
if ret[host][fun]:
docs[fun] = ret[host][fun]
for fun in sorted(docs):
print(fun + ':')
print(docs[fun])
print('')
class SaltCP(parsers.SaltCPOptionParser):
'''
Run the salt-cp command line client
'''
def run(self):
'''
Execute salt-cp
'''
self.parse_args()
cp_ = salt.cli.cp.SaltCP(self.config)
cp_.run()
class SaltKey(parsers.SaltKeyOptionParser):
'''
Initialize the Salt key manager
'''
def run(self):
'''
Execute salt-key
'''
self.parse_args()
if self.config['verify_env']:
verify_env([
os.path.join(self.config['pki_dir'], 'minions'),
os.path.join(self.config['pki_dir'], 'minions_pre'),
os.path.join(self.config['pki_dir'], 'minions_rejected'),
os.path.dirname(self.config['key_logfile']),
],
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
)
self.setup_logfile_logger()
key = salt.cli.key.Key(self.config)
key.run()
class SaltCall(parsers.SaltCallOptionParser):
'''
Used to locally execute a salt command
'''
def run(self):
'''
Execute the salt call!
'''
self.parse_args()
if self.config['verify_env']:
verify_env([
self.config['pki_dir'],
self.config['cachedir'],
os.path.dirname(self.config['log_file'])
],
self.config['user'],
permissive=self.config['permissive_pki_access'],
pki_dir=self.config['pki_dir'],
)
caller = salt.cli.caller.Caller(self.config)
if self.options.doc:
caller.print_docs()
self.exit(0)
if self.options.grains_run:
caller.print_grains()
self.exit(0)
caller.run()
class SaltRun(parsers.SaltRunOptionParser):
def run(self):
'''
Execute salt-run
'''
self.parse_args()
runner = salt.runner.Runner(self.config)
if self.options.doc:
runner._print_docs()
else:
# Run this here so SystemExit isn't raised anywhere else when
# someone tries to use the runners via the python api
try:
runner.run()
except SaltClientError as exc:
raise SystemExit(str(exc))
|
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import typing
from decimal import Decimal
from solana.publickey import PublicKey
from .accountinfo import AccountInfo
from .addressableaccount import AddressableAccount
from .cache import Cache, PerpMarketCache, MarketCache
from .constants import SYSTEM_PROGRAM_ADDRESS
from .context import Context
from .instrumentlookup import InstrumentLookup
from .instrumentvalue import InstrumentValue
from .layouts import layouts
from .lotsizeconverter import LotSizeConverter, RaisingLotSizeConverter
from .marketlookup import MarketLookup
from .metadata import Metadata
from .token import Instrument, Token
from .tokenbank import TokenBank
from .version import Version
# # 🥭 GroupSlotSpotMarket class
#
class GroupSlotSpotMarket:
def __init__(self, address: PublicKey, maint_asset_weight: Decimal, init_asset_weight: Decimal, maint_liab_weight: Decimal, init_liab_weight: Decimal) -> None:
self.address: PublicKey = address
self.maint_asset_weight: Decimal = maint_asset_weight
self.init_asset_weight: Decimal = init_asset_weight
self.maint_liab_weight: Decimal = maint_liab_weight
self.init_liab_weight: Decimal = init_liab_weight
@staticmethod
def from_layout(layout: typing.Any) -> "GroupSlotSpotMarket":
spot_market: PublicKey = layout.spot_market
maint_asset_weight: Decimal = round(layout.maint_asset_weight, 8)
init_asset_weight: Decimal = round(layout.init_asset_weight, 8)
maint_liab_weight: Decimal = round(layout.maint_liab_weight, 8)
init_liab_weight: Decimal = round(layout.init_liab_weight, 8)
return GroupSlotSpotMarket(spot_market, maint_asset_weight, init_asset_weight, maint_liab_weight, init_liab_weight)
@staticmethod
def from_layout_or_none(layout: typing.Any) -> typing.Optional["GroupSlotSpotMarket"]:
if (layout.spot_market is None) or (layout.spot_market == SYSTEM_PROGRAM_ADDRESS):
return None
return GroupSlotSpotMarket.from_layout(layout)
def __str__(self) -> str:
return f"""« 𝙶𝚛𝚘𝚞𝚙𝚂𝚕𝚘𝚝𝚂𝚙𝚘𝚝𝙼𝚊𝚛𝚔𝚎𝚝 [{self.address}]
Asset Weights:
Initial: {self.init_asset_weight}
Maintenance: {self.maint_asset_weight}
Liability Weights:
Initial: {self.init_liab_weight}
Maintenance: {self.maint_liab_weight}
»"""
def __repr__(self) -> str:
return f"{self}"
# # 🥭 GroupSlotPerpMarket class
#
class GroupSlotPerpMarket:
def __init__(self, address: PublicKey, maint_asset_weight: Decimal, init_asset_weight: Decimal, maint_liab_weight: Decimal, init_liab_weight: Decimal, liquidation_fee: Decimal, base_lot_size: Decimal, quote_lot_size: Decimal) -> None:
self.address: PublicKey = address
self.maint_asset_weight: Decimal = maint_asset_weight
self.init_asset_weight: Decimal = init_asset_weight
self.maint_liab_weight: Decimal = maint_liab_weight
self.init_liab_weight: Decimal = init_liab_weight
self.liquidation_fee: Decimal = liquidation_fee
self.base_lot_size: Decimal = base_lot_size
self.quote_lot_size: Decimal = quote_lot_size
@staticmethod
def from_layout(layout: typing.Any) -> "GroupSlotPerpMarket":
perp_market: PublicKey = layout.perp_market
maint_asset_weight: Decimal = round(layout.maint_asset_weight, 8)
init_asset_weight: Decimal = round(layout.init_asset_weight, 8)
maint_liab_weight: Decimal = round(layout.maint_liab_weight, 8)
init_liab_weight: Decimal = round(layout.init_liab_weight, 8)
liquidation_fee: Decimal = round(layout.liquidation_fee, 8)
base_lot_size: Decimal = layout.base_lot_size
quote_lot_size: Decimal = layout.quote_lot_size
return GroupSlotPerpMarket(perp_market, maint_asset_weight, init_asset_weight, maint_liab_weight, init_liab_weight, liquidation_fee, base_lot_size, quote_lot_size)
@staticmethod
def from_layout_or_none(layout: typing.Any) -> typing.Optional["GroupSlotPerpMarket"]:
if (layout.perp_market is None) or (layout.perp_market == SYSTEM_PROGRAM_ADDRESS):
return None
return GroupSlotPerpMarket.from_layout(layout)
def __str__(self) -> str:
return f"""« 𝙶𝚛𝚘𝚞𝚙𝚂𝚕𝚘𝚝𝙿𝚎𝚛𝚙𝙼𝚊𝚛𝚔𝚎𝚝 [{self.address}]
Asset Weights:
Initial: {self.init_asset_weight}
Maintenance: {self.maint_asset_weight}
Liability Weights:
Initial: {self.init_liab_weight}
Maintenance: {self.maint_liab_weight}
Liquidation Fee: {self.liquidation_fee}
Base Lot Size: {self.base_lot_size}
Quote Lot Size: {self.quote_lot_size}
»"""
def __repr__(self) -> str:
return f"{self}"
# # 🥭 GroupSlot class
#
# `GroupSlot` gathers indexed slot items together instead of separate arrays.
#
class GroupSlot:
def __init__(self, index: int, base_instrument: Instrument, base_token_bank: typing.Optional[TokenBank], quote_token_bank: TokenBank, spot_market_info: typing.Optional[GroupSlotSpotMarket], perp_market_info: typing.Optional[GroupSlotPerpMarket], perp_lot_size_converter: LotSizeConverter, oracle: PublicKey) -> None:
self.index: int = index
self.base_instrument: Instrument = base_instrument
self.base_token_bank: typing.Optional[TokenBank] = base_token_bank
self.quote_token_bank: TokenBank = quote_token_bank
self.spot_market: typing.Optional[GroupSlotSpotMarket] = spot_market_info
self.perp_market: typing.Optional[GroupSlotPerpMarket] = perp_market_info
self.perp_lot_size_converter: LotSizeConverter = perp_lot_size_converter
self.oracle: PublicKey = oracle
def __str__(self) -> str:
base_token_bank = f"{self.base_token_bank}".replace("\n", "\n ")
quote_token_bank = f"{self.quote_token_bank}".replace("\n", "\n ")
spot_market_info = f"{self.spot_market}".replace("\n", "\n ")
perp_market_info = f"{self.perp_market}".replace("\n", "\n ")
return f"""« 𝙶𝚛𝚘𝚞𝚙𝚂𝚕𝚘𝚝[{self.index}] {self.base_instrument}
Base Token Info:
{base_token_bank}
Quote Token Info:
{quote_token_bank}
Oracle: {self.oracle}
Spot Market:
{spot_market_info}
Perp Market:
{perp_market_info}
»"""
def __repr__(self) -> str:
return f"{self}"
# # 🥭 Group class
#
# `Group` defines root functionality for Mango Markets.
#
class Group(AddressableAccount):
def __init__(self, account_info: AccountInfo, version: Version, name: str,
meta_data: Metadata,
shared_quote: TokenBank,
slot_indices: typing.Sequence[bool],
slots: typing.Sequence[GroupSlot],
signer_nonce: Decimal, signer_key: PublicKey,
admin: PublicKey, serum_program_address: PublicKey, cache: PublicKey, valid_interval: Decimal,
insurance_vault: PublicKey, srm_vault: PublicKey, msrm_vault: PublicKey, fees_vault: PublicKey) -> None:
super().__init__(account_info)
self.version: Version = version
self.name: str = name
self.meta_data: Metadata = meta_data
self.shared_quote: TokenBank = shared_quote
self.slot_indices: typing.Sequence[bool] = slot_indices
self.slots: typing.Sequence[GroupSlot] = slots
self.signer_nonce: Decimal = signer_nonce
self.signer_key: PublicKey = signer_key
self.admin: PublicKey = admin
self.serum_program_address: PublicKey = serum_program_address
self.cache: PublicKey = cache
self.valid_interval: Decimal = valid_interval
self.insurance_vault: PublicKey = insurance_vault
self.srm_vault: PublicKey = srm_vault
self.msrm_vault: PublicKey = msrm_vault
self.fees_vault: PublicKey = fees_vault
@property
def shared_quote_token(self) -> Token:
return Token.ensure(self.shared_quote.token)
@property
def liquidity_incentive_token_bank(self) -> TokenBank:
for token_bank in self.tokens:
if token_bank.token.symbol_matches("MNGO"):
return token_bank
raise Exception(f"Could not find token info for symbol 'MNGO' in group {self.address}")
@property
def liquidity_incentive_token(self) -> Token:
return Token.ensure(self.liquidity_incentive_token_bank.token)
@property
def tokens(self) -> typing.Sequence[TokenBank]:
return [*self.base_tokens, self.shared_quote]
@property
def tokens_by_index(self) -> typing.Sequence[typing.Optional[TokenBank]]:
return [*self.base_tokens_by_index, self.shared_quote]
@property
def slots_by_index(self) -> typing.Sequence[typing.Optional[GroupSlot]]:
mapped_items: typing.List[typing.Optional[GroupSlot]] = []
slot_counter = 0
for available in self.slot_indices:
if available:
mapped_items += [self.slots[slot_counter]]
slot_counter += 1
else:
mapped_items += [None]
return mapped_items
@property
def base_tokens(self) -> typing.Sequence[TokenBank]:
return [slot.base_token_bank for slot in self.slots if slot.base_token_bank is not None]
@property
def base_tokens_by_index(self) -> typing.Sequence[typing.Optional[TokenBank]]:
return [slot.base_token_bank if slot is not None else None for slot in self.slots_by_index]
@property
def oracles(self) -> typing.Sequence[PublicKey]:
return [slot.oracle for slot in self.slots if slot.oracle is not None]
@property
def oracles_by_index(self) -> typing.Sequence[typing.Optional[PublicKey]]:
return [slot.oracle if slot is not None else None for slot in self.slots_by_index]
@property
def spot_markets(self) -> typing.Sequence[GroupSlotSpotMarket]:
return [slot.spot_market for slot in self.slots if slot.spot_market is not None]
@property
def spot_markets_by_index(self) -> typing.Sequence[typing.Optional[GroupSlotSpotMarket]]:
return [slot.spot_market if slot is not None else None for slot in self.slots_by_index]
@property
def perp_markets(self) -> typing.Sequence[GroupSlotPerpMarket]:
return [slot.perp_market for slot in self.slots if slot.perp_market is not None]
@property
def perp_markets_by_index(self) -> typing.Sequence[typing.Optional[GroupSlotPerpMarket]]:
return [slot.perp_market if slot is not None else None for slot in self.slots_by_index]
@staticmethod
def from_layout(layout: typing.Any, name: str, account_info: AccountInfo, version: Version, instrument_lookup: InstrumentLookup, market_lookup: MarketLookup) -> "Group":
meta_data: Metadata = Metadata.from_layout(layout.meta_data)
tokens: typing.List[typing.Optional[TokenBank]] = [
TokenBank.from_layout_or_none(t, instrument_lookup) for t in layout.tokens]
# By convention, the shared quote token is always at the end.
quote_token_bank: typing.Optional[TokenBank] = tokens[-1]
if quote_token_bank is None:
raise Exception("Could not find quote token info at end of group tokens.")
slots: typing.List[GroupSlot] = []
in_slots: typing.List[bool] = []
for index in range(len(tokens) - 1):
spot_market_info: typing.Optional[GroupSlotSpotMarket] = GroupSlotSpotMarket.from_layout_or_none(
layout.spot_markets[index])
perp_market_info: typing.Optional[GroupSlotPerpMarket] = GroupSlotPerpMarket.from_layout_or_none(
layout.perp_markets[index])
if (spot_market_info is None) and (perp_market_info is None):
in_slots += [False]
else:
perp_lot_size_converter: LotSizeConverter = RaisingLotSizeConverter()
base_token_bank: typing.Optional[TokenBank] = tokens[index]
base_instrument: Instrument
if base_token_bank is not None:
base_instrument = base_token_bank.token
else:
# It's possible there's no underlying SPL token and we have a pure PERP market.
if perp_market_info is None:
raise Exception(f"Cannot find base token or perp market info for index {index}")
perp_market = market_lookup.find_by_address(perp_market_info.address)
if perp_market is None:
raise Exception(f"Cannot find base token or perp market for index {index}")
base_instrument = perp_market.base
if perp_market_info is not None:
perp_lot_size_converter = LotSizeConverter(
base_instrument, perp_market_info.base_lot_size, quote_token_bank.token, perp_market_info.quote_lot_size)
oracle: PublicKey = layout.oracles[index]
slot: GroupSlot = GroupSlot(index, base_instrument, base_token_bank, quote_token_bank,
spot_market_info, perp_market_info, perp_lot_size_converter, oracle)
slots += [slot]
in_slots += [True]
signer_nonce: Decimal = layout.signer_nonce
signer_key: PublicKey = layout.signer_key
admin: PublicKey = layout.admin
serum_program_address: PublicKey = layout.serum_program_address
cache_address: PublicKey = layout.cache
valid_interval: Decimal = layout.valid_interval
insurance_vault: PublicKey = layout.insurance_vault
srm_vault: PublicKey = layout.srm_vault
msrm_vault: PublicKey = layout.msrm_vault
fees_vault: PublicKey = layout.fees_vault
return Group(account_info, version, name, meta_data, quote_token_bank, in_slots, slots, signer_nonce, signer_key, admin, serum_program_address, cache_address, valid_interval, insurance_vault, srm_vault, msrm_vault, fees_vault)
@staticmethod
def parse(account_info: AccountInfo, name: str, instrument_lookup: InstrumentLookup, market_lookup: MarketLookup) -> "Group":
data = account_info.data
if len(data) != layouts.GROUP.sizeof():
raise Exception(
f"Group data length ({len(data)}) does not match expected size ({layouts.GROUP.sizeof()})")
layout = layouts.GROUP.parse(data)
return Group.from_layout(layout, name, account_info, Version.V3, instrument_lookup, market_lookup)
@staticmethod
def parse_with_context(context: Context, account_info: AccountInfo) -> "Group":
name = context.lookup_group_name(account_info.address)
return Group.parse(account_info, name, context.instrument_lookup, context.market_lookup)
@staticmethod
def load(context: Context, address: typing.Optional[PublicKey] = None) -> "Group":
group_address: PublicKey = address or context.group_address
account_info = AccountInfo.load(context, group_address)
if account_info is None:
raise Exception(f"Group account not found at address '{group_address}'")
name = context.lookup_group_name(account_info.address)
return Group.parse(account_info, name, context.instrument_lookup, context.market_lookup)
def slot_by_spot_market_address(self, spot_market_address: PublicKey) -> GroupSlot:
for slot in self.slots:
if slot.spot_market is not None and slot.spot_market.address == spot_market_address:
return slot
raise Exception(f"Could not find spot market {spot_market_address} in group {self.address}")
def slot_by_perp_market_address(self, perp_market_address: PublicKey) -> GroupSlot:
for slot in self.slots:
if slot.perp_market is not None and slot.perp_market.address == perp_market_address:
return slot
raise Exception(f"Could not find perp market {perp_market_address} in group {self.address}")
def slot_by_instrument_or_none(self, instrument: Instrument) -> typing.Optional[GroupSlot]:
for slot in self.slots:
if slot.base_instrument == instrument:
return slot
return None
def slot_by_instrument(self, instrument: Instrument) -> GroupSlot:
slot: typing.Optional[GroupSlot] = self.slot_by_instrument_or_none(instrument)
if slot is not None:
return slot
raise Exception(f"Could not find slot for {instrument} in group {self.address}")
def token_bank_by_instrument(self, instrument: Instrument) -> TokenBank:
for token_bank in self.tokens:
if token_bank.token == instrument:
return token_bank
raise Exception(f"Could not find token {instrument} in group {self.address}")
def token_price_from_cache(self, cache: Cache, token: Instrument) -> InstrumentValue:
market_cache: MarketCache = self.market_cache_from_cache(cache, token)
return market_cache.adjusted_price(token, self.shared_quote_token)
def perp_market_cache_from_cache(self, cache: Cache, token: Instrument) -> typing.Optional[PerpMarketCache]:
market_cache: MarketCache = self.market_cache_from_cache(cache, token)
return market_cache.perp_market
def market_cache_from_cache(self, cache: Cache, instrument: Instrument) -> MarketCache:
slot: GroupSlot = self.slot_by_instrument(instrument)
instrument_index: int = slot.index
return cache.market_cache_for_index(instrument_index)
def fetch_cache(self, context: Context) -> Cache:
return Cache.load(context, self.cache)
def __str__(self) -> str:
slot_count = len(self.slots)
slots = "\n ".join([f"{item}".replace("\n", "\n ") for item in self.slots])
return f"""« 𝙶𝚛𝚘𝚞𝚙 {self.version} [{self.address}]
{self.meta_data}
Name: {self.name}
Signer [Nonce: {self.signer_nonce}]: {self.signer_key}
Admin: {self.admin}
DEX Program ID: {self.serum_program_address}
Cache: {self.cache}
Insurance Vault: {self.insurance_vault}
SRM Vault: {self.srm_vault}
MSRM Vault: {self.msrm_vault}
Fees Vault: {self.fees_vault}
Valid Interval: {self.valid_interval}
Basket [{slot_count} markets]:
{slots}
»"""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
排序算法:
O(n^2)
冒泡排序
插入排序
选择排序
Q(n log n) 分而治之
快速排序
归并排序
"""
|
from logging import getLogger
from typing import (
List,
Optional,
Set,
)
from pydantic import (
BaseModel,
Extra,
Field,
)
from galaxy import exceptions
from galaxy.datatypes.registry import Registry
from galaxy.managers.collections import DatasetCollectionManager
from galaxy.managers.collections_util import (
api_payload_to_create_params,
dictify_dataset_collection_instance,
dictify_element_reference,
)
from galaxy.managers.context import ProvidesHistoryContext
from galaxy.managers.hdcas import HDCAManager
from galaxy.managers.histories import HistoryManager
from galaxy.schema.fields import (
EncodedDatabaseIdField,
ModelClassField,
)
from galaxy.schema.schema import (
AnyHDCA,
CreateNewCollectionPayload,
DatasetCollectionInstanceType,
DCESummary,
DCEType,
HDCADetailed,
TagCollection,
)
from galaxy.security.idencoding import IdEncodingHelper
from galaxy.webapps.base.controller import UsesLibraryMixinItems
from galaxy.webapps.galaxy.services.base import ServiceBase
log = getLogger(__name__)
class UpdateCollectionAttributePayload(BaseModel):
"""Contains attributes that can be updated for all elements in a dataset collection."""
dbkey: str = Field(..., description="TODO")
class Config:
extra = Extra.forbid # will cause validation to fail if extra attributes are included,
class DatasetCollectionAttributesResult(BaseModel):
dbkey: str = Field(..., description="TODO")
# Are the following fields really used/needed?
extension: str = Field(..., description="The dataset file extension.", example="txt")
model_class: str = ModelClassField("HistoryDatasetCollectionAssociation")
dbkeys: Optional[Set[str]]
extensions: Optional[Set[str]]
tags: TagCollection
class SuitableConverter(BaseModel):
tool_id: str = Field(..., description="The ID of the tool that can perform the type conversion.")
name: str = Field(..., description="The name of the converter.")
target_type: str = Field(..., description="The type to convert to.")
original_type: str = Field(..., description="The type to convert from.")
class SuitableConverters(BaseModel):
"""Collection of converters that can be used on a particular dataset collection."""
__root__: List[SuitableConverter]
class DatasetCollectionContentElements(BaseModel):
"""Represents a collection of elements contained in the dataset collection."""
__root__: List[DCESummary]
class DatasetCollectionsService(ServiceBase, UsesLibraryMixinItems):
def __init__(
self,
security: IdEncodingHelper,
history_manager: HistoryManager,
hdca_manager: HDCAManager,
collection_manager: DatasetCollectionManager,
datatypes_registry: Registry,
):
super().__init__(security)
self.history_manager = history_manager
self.hdca_manager = hdca_manager
self.collection_manager = collection_manager
self.datatypes_registry = datatypes_registry
def create(self, trans: ProvidesHistoryContext, payload: CreateNewCollectionPayload) -> HDCADetailed:
"""
Create a new dataset collection instance.
:type payload: dict
:param payload: (optional) dictionary structure containing:
* collection_type: dataset collection type to create.
* instance_type: Instance type - 'history' or 'library'.
* name: the new dataset collections's name
* datasets: object describing datasets for collection
:rtype: dict
:returns: element view of new dataset collection
"""
# TODO: Error handling...
create_params = api_payload_to_create_params(payload.dict(exclude_unset=True))
if payload.instance_type == DatasetCollectionInstanceType.history:
if payload.history_id is None:
raise exceptions.RequestParameterInvalidException("Parameter history_id is required.")
history_id = self.decode_id(payload.history_id)
history = self.history_manager.get_owned(history_id, trans.user, current_history=trans.history)
create_params["parent"] = history
create_params["history"] = history
elif payload.instance_type == DatasetCollectionInstanceType.library:
library_folder = self.get_library_folder(trans, payload.folder_id, check_accessible=True)
self.check_user_can_add_to_library_item(trans, library_folder, check_accessible=False)
create_params["parent"] = library_folder
else:
raise exceptions.RequestParameterInvalidException()
dataset_collection_instance = self.collection_manager.create(trans=trans, **create_params)
rval = dictify_dataset_collection_instance(
dataset_collection_instance,
security=trans.security,
url_builder=trans.url_builder,
parent=create_params["parent"],
)
return rval
def copy(
self, trans: ProvidesHistoryContext, id: EncodedDatabaseIdField, payload: UpdateCollectionAttributePayload
):
"""
Iterate over all datasets of a collection and copy datasets with new attributes to a new collection.
e.g attributes = {'dbkey': 'dm3'}
"""
self.collection_manager.copy(
trans, trans.history, "hdca", id, copy_elements=True, dataset_instance_attributes=payload.dict()
)
def attributes(
self,
trans: ProvidesHistoryContext,
id: EncodedDatabaseIdField,
instance_type: DatasetCollectionInstanceType = DatasetCollectionInstanceType.history,
) -> DatasetCollectionAttributesResult:
"""
Returns dbkey/extension for collection elements
"""
dataset_collection_instance = self.collection_manager.get_dataset_collection_instance(
trans, id=id, instance_type=instance_type, check_ownership=True
)
rval = dataset_collection_instance.to_dict(view="dbkeysandextensions")
return rval
def suitable_converters(
self,
trans: ProvidesHistoryContext,
id: EncodedDatabaseIdField,
instance_type: DatasetCollectionInstanceType = DatasetCollectionInstanceType.history,
) -> SuitableConverters:
"""
Returns suitable converters for all datatypes in collection
"""
rval = self.collection_manager.get_converters_for_collection(trans, id, self.datatypes_registry, instance_type)
return rval
def show(
self,
trans: ProvidesHistoryContext,
id: EncodedDatabaseIdField,
instance_type: DatasetCollectionInstanceType = DatasetCollectionInstanceType.history,
) -> AnyHDCA:
"""
Returns information about a particular dataset collection.
"""
dataset_collection_instance = self.collection_manager.get_dataset_collection_instance(
trans,
id=id,
instance_type=instance_type,
)
if instance_type == DatasetCollectionInstanceType.history:
parent = dataset_collection_instance.history
elif instance_type == DatasetCollectionInstanceType.library:
parent = dataset_collection_instance.folder
else:
raise exceptions.RequestParameterInvalidException()
rval = dictify_dataset_collection_instance(
dataset_collection_instance,
security=trans.security,
url_builder=trans.url_builder,
parent=parent,
view="element",
)
return rval
def contents(
self,
trans: ProvidesHistoryContext,
hdca_id: EncodedDatabaseIdField,
parent_id: EncodedDatabaseIdField,
instance_type: DatasetCollectionInstanceType = DatasetCollectionInstanceType.history,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> DatasetCollectionContentElements:
"""
Shows direct child contents of indicated dataset collection parent id
:type string: encoded string id
:param id: HDCA.id
:type string: encoded string id
:param parent_id: parent dataset_collection.id for the dataset contents to be viewed
:type integer: int
:param limit: pagination limit for returned dataset collection elements
:type integer: int
:param offset: pagination offset for returned dataset collection elements
:rtype: list
:returns: list of dataset collection elements and contents
"""
# validate HDCA for current user, will throw error if not permitted
# TODO: refactor get_dataset_collection_instance
hdca = self.collection_manager.get_dataset_collection_instance(
trans, id=hdca_id, check_ownership=True, instance_type=instance_type
)
# check to make sure the dsc is part of the validated hdca
decoded_parent_id = self.decode_id(parent_id)
if parent_id != hdca_id and not hdca.contains_collection(decoded_parent_id):
raise exceptions.ObjectNotFound(
"Requested dataset collection is not contained within indicated history content"
)
# retrieve contents
contents = self.collection_manager.get_collection_contents(trans, decoded_parent_id, limit=limit, offset=offset)
# dictify and tack on a collection_url for drilling down into nested collections
def serialize_element(dsc_element) -> DCESummary:
result = dictify_element_reference(dsc_element, recursive=False, security=trans.security)
if result["element_type"] == DCEType.dataset_collection:
assert trans.url_builder
result["object"]["contents_url"] = trans.url_builder(
"contents_dataset_collection",
hdca_id=self.encode_id(hdca.id),
parent_id=self.encode_id(result["object"]["id"]),
)
trans.security.encode_all_ids(result, recursive=True)
return result
rval = [serialize_element(el) for el in contents]
return DatasetCollectionContentElements.parse_obj(rval)
|
# Generated by Django 2.2.10 on 2020-03-04 23:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcim', '0101_auto_20200304_1403'),
]
operations = [
migrations.CreateModel(
name='ApplicationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('created', models.DateField(auto_now_add=True, null=True)),
('last_updated', models.DateTimeField(auto_now=True, null=True)),
('name', models.CharField(max_length=100, unique=True)),
('slug', models.SlugField(max_length=100, unique=True)),
('language', models.CharField(blank=True, max_length=50, null=True)),
('actual_version', models.CharField(blank=True, max_length=50)),
('recommended_version', models.CharField(max_length=100)),
],
options={
'ordering': ['name', 'slug', 'language', 'actual_version', 'recommended_version'],
},
),
]
|
# -*- coding:utf-8 -*-
"""Test function for vega.run"""
import unittest
def lazy(func):
"""lazy function wrapper
:param func: function name
"""
attr_name = "_lazy_" + func.__name__
def lazy_func(*args, **kwargs):
"""Wrapper of lazy func
:param args: any object
:param kwargs: any kwargs
:return:
"""
if not hasattr(func, attr_name):
setattr(func, attr_name, func(*args, **kwargs))
return getattr(func, attr_name)
return lazy_func
@lazy
def env_args(args):
"""A lazy function will be execute when call
:param args: any object
:return:
"""
return args
class TestPipeline(unittest.TestCase):
"""Test lazy function worked in pipeline"""
def test_env_args(self):
"""Test function 'env_args' is a lazy function"""
args = {'env': 'test'}
env_args(args)
self.assertEqual(env_args(), {'env': 'test'})
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Almost every FBCodeBuilder string is ultimately passed to a shell. Escaping
too little or too much tends to be the most common error. The utilities in
this file give a systematic way of avoiding such bugs:
- When you write literal strings destined for the shell, use `ShellQuoted`.
- When these literal strings are parameterized, use `ShellQuoted.format`.
- Any parameters that are raw strings get `shell_quote`d automatically,
while any ShellQuoted parameters will be left intact.
- Use `path_join` to join path components.
- Use `shell_join` to join already-quoted command arguments or shell lines.
"""
import os
from collections import namedtuple
# pyre-fixme[13] This is too magical for Pyre.
class ShellQuoted(namedtuple("ShellQuoted", ("do_not_use_raw_str",))):
"""
Wrap a string with this to make it transparent to shell_quote(). It
will almost always suffice to use ShellQuoted.format(), path_join(),
or shell_join().
If you really must, use raw_shell() to access the raw string.
"""
def __new__(cls, s):
"No need to nest ShellQuoted."
return super(ShellQuoted, cls).__new__(
cls, s.do_not_use_raw_str if isinstance(s, ShellQuoted) else s
)
def __str__(self):
raise RuntimeError(
"One does not simply convert {0} to a string -- use path_join() "
"or ShellQuoted.format() instead".format(repr(self))
)
def __repr__(self) -> str:
return "{0}({1})".format(self.__class__.__name__, repr(self.do_not_use_raw_str))
def format(self, **kwargs) -> "ShellQuoted":
"""
Use instead of str.format() when the arguments are either
`ShellQuoted()` or raw strings needing to be `shell_quote()`d.
Positional args are deliberately not supported since they are more
error-prone.
"""
return ShellQuoted(
self.do_not_use_raw_str.format(
**dict(
(k, shell_quote(v).do_not_use_raw_str) for k, v in kwargs.items()
)
)
)
def shell_quote(s) -> ShellQuoted:
"Quotes a string if it is not already quoted"
return (
s
if isinstance(s, ShellQuoted)
else ShellQuoted("'" + str(s).replace("'", "'\\''") + "'")
)
def raw_shell(s: ShellQuoted):
"Not a member of ShellQuoted so we get a useful error for raw strings"
if isinstance(s, ShellQuoted):
return s.do_not_use_raw_str
raise RuntimeError("{0} should have been ShellQuoted".format(s))
def shell_join(delim, it) -> ShellQuoted:
"Joins an iterable of ShellQuoted with a delimiter between each two"
return ShellQuoted(delim.join(raw_shell(s) for s in it))
def path_join(*args) -> ShellQuoted:
"Joins ShellQuoted and raw pieces of paths to make a shell-quoted path"
return ShellQuoted(os.path.join(*[raw_shell(shell_quote(s)) for s in args]))
def shell_comment(c: ShellQuoted) -> ShellQuoted:
"Do not shell-escape raw strings in comments, but do handle line breaks."
return ShellQuoted("# {c}").format(
c=ShellQuoted(
(raw_shell(c) if isinstance(c, ShellQuoted) else c).replace("\n", "\n# ")
)
)
|
"""Assortment of utilities.
"""
from datetime import datetime
from hashlib import sha256, md5
import random
import re
import string
import unicodedata
epoch = datetime.utcfromtimestamp(0)
def unix_time(dt=None):
"""Returns the UNIX time representation for a given date or current UTC."""
dt = dt or datetime.utcnow()
return int((dt - epoch).total_seconds())
def unix_time_millis(dt=None):
"""Returns the UNIX time representation for a given date or current UTC."""
dt = dt or datetime.utcnow()
return int((dt - epoch).total_seconds() * 1000)
def from_iso_to_unix(iso_dt):
"""Converts from ISO format (e.g., "1984-06-02T19:05:00.000Z") to UNIX.
"""
if isinstance(iso_dt, int):
return iso_dt # Already UNIX.
try:
dt = datetime.strptime(iso_dt, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
dt = datetime.strptime(iso_dt, '%Y-%m-%dT%H:%M:%SZ')
return unix_time(dt)
def from_unix_to_iso(dt=None):
"""Returns the ISO representation for a given date or current UTC."""
dt = int(dt) if dt is not None else unix_time()
return datetime.utcfromtimestamp(dt).strftime('%Y-%m-%dT%H:%M:%SZ')
def datetime_from_unix_time(unix_dt):
"""Returns a datetime object."""
return datetime.utcfromtimestamp(unix_dt)
def datetime_from_iso(iso_dt):
"""Returns a datetime object."""
return datetime.strptime(iso_dt, '%Y-%m-%dT%H:%M:%SZ')
def password_hash(password, salt):
if salt:
return sha256(password + salt).hexdigest()
else:
return md5(password).hexdigest()[::-1]
def random_string(size):
return ''.join(random.SystemRandom().choice(
string.ascii_letters + string.digits) for _ in range(size))
def remove_accents(input_str):
"""Replaces accented chars by their ascii equivalents."""
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
def make_url_friendly(input_str):
"""Returns a URL-friedly version of input_str.
Removes non-word chars, replaces accented ones and makes it lowercase.
"""
if input_str is None:
return None
return re.sub(r'[\W\\/_]+', '-', remove_accents(input_str)).lower()
|
"""Copyright (c) 2021, Deep Net. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
EPSILON = 1e-20
|
print 'Hello World!'
|
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 153750.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
|
from __future__ import annotations
import typing as t
from functools import singledispatch
from inflection import underscore
from sqlalchemy import Date
from sqlalchemy import DateTime
from sqlalchemy import Text
from sqlalchemy import Time
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy.types import Boolean
from sqlalchemy.types import Float
from sqlalchemy.types import Integer
from sqlalchemy.types import Numeric
from sqlalchemy.types import String
from sqlalchemy.types import VARCHAR
from sqlalchemy_utils import EmailType
from sqlalchemy_utils import get_mapper
from sqlalchemy_utils import JSONType
from sqlalchemy_utils import PhoneNumberType
from sqlalchemy_utils import URLType
from sqlalchemy_utils.types import ChoiceType
from .definitions import MagqlEnumType
from .definitions import MagqlInputField
from .definitions import MagqlInputObjectType
StringFilter = MagqlInputObjectType(
"StringFilter",
{
"operator": MagqlInputField(
MagqlEnumType(
"StringOperator",
{
"INCLUDES": "INCLUDES",
"EQUALS": "EQUALS",
"EXISTS": "EXISTS",
"DOESNOTEXIST": "DOESNOTEXIST",
},
)
),
"value": MagqlInputField("String"),
},
)
DateFilter = MagqlInputObjectType(
"DateFilter",
{
"operator": MagqlInputField(
MagqlEnumType(
"DateOperator", {"BEFORE": "BEFORE", "ON": "ON", "AFTER": "AFTER"}
)
),
"value": MagqlInputField("String"),
},
)
IntFilter = MagqlInputObjectType(
"IntFilter",
{
"operator": MagqlInputField(
MagqlEnumType(
"IntOperator",
{
"lt": "lt",
"lte": "lte",
"eq": "eq",
"neq": "neq",
"gt": "gt",
"gte": "gte",
},
)
),
"value": MagqlInputField("Int"),
},
)
FloatFilter = MagqlInputObjectType(
"FloatFilter",
{
"operator": MagqlInputField(
MagqlEnumType(
"FloatOperator",
{
"lt": "lt",
"lte": "lte",
"eq": "eq",
"neq": "neq",
"gt": "gt",
"gte": "gte",
},
)
),
"value": MagqlInputField("Float"),
},
)
RelFilter = MagqlInputObjectType(
"RelFilter",
{
"operator": MagqlInputField(
MagqlEnumType("RelOperator", {"INCLUDES": "INCLUDES"})
),
"value": MagqlInputField("Int"),
},
)
BooleanFilter = MagqlInputObjectType(
"BooleanFilter",
{
"operator": MagqlInputField(
MagqlEnumType(
"BooleanOperator", {"EQUALS": "EQUALS", "NOTEQUALS": "NOTEQUALS"}
)
),
"value": MagqlInputField("Boolean"),
},
)
EnumOperator = MagqlEnumType("EnumOperator", {"INCLUDES": "INCLUDES"})
def EnumFilter(base_type: t.Any) -> MagqlInputObjectType:
name = base_type.name + "Filter"
input_ = {
"operator": MagqlInputField(EnumOperator),
"value": MagqlInputField(base_type),
}
return MagqlInputObjectType(name, input_)
@singledispatch
def get_filter_comparator(type: t.Any) -> t.Any:
raise TypeError(f"No comparator registered for {type.__class__.__name__!r}.")
@get_filter_comparator.register(RelationshipProperty)
def _get_relationship_comparator(rel: RelationshipProperty) -> t.Optional[t.Callable]:
direction = rel.direction.name
if "TOONE" in direction:
def condition(filter_value: t.Any, filter_operator: str, field: t.Any) -> t.Any:
if filter_operator == "INCLUDES":
return field == filter_value
return None
return condition
elif "TOMANY" in direction:
def condition(filter_value: t.Any, filter_operator: str, field: t.Any) -> t.Any:
if filter_operator == "INCLUDES":
return field.any(field.contains(filter_value))
return None
return condition
return None
@get_filter_comparator.register(DateTime)
@get_filter_comparator.register(Date)
def _get_date_comparator(_: t.Union[DateTime, Date]) -> t.Callable:
def condition(
filter_value: t.Union[DateTime, Date],
filter_operator: str,
field: t.Union[DateTime, Date],
) -> t.Any:
if filter_operator == "BEFORE":
return field < filter_value
elif filter_operator == "ON":
return field == filter_value
elif filter_operator == "After":
return field > filter_value
return None
return condition
@get_filter_comparator.register(JSONType)
@get_filter_comparator.register(Text)
@get_filter_comparator.register(UnicodeText)
@get_filter_comparator.register(Unicode)
@get_filter_comparator.register(URLType)
@get_filter_comparator.register(PhoneNumberType)
@get_filter_comparator.register(EmailType)
@get_filter_comparator.register(Time)
@get_filter_comparator.register(String)
@get_filter_comparator.register(VARCHAR)
def _get_string_comparator(_: t.Any) -> t.Callable:
def condition(filter_value: t.Any, filter_operator: str, field: t.Any) -> t.Any:
if filter_operator == "INCLUDES":
return field.like(f"%{filter_value}%")
elif filter_operator == "EQUALS":
return field == filter_value
elif filter_operator == "EXISTS":
return field.like("%")
elif filter_operator == "DOESNOTEXIST":
return field.is_(None)
return condition
@get_filter_comparator.register(Float)
@get_filter_comparator.register(Numeric)
@get_filter_comparator.register(Integer)
def _get_number_comparator(_: t.Any) -> t.Callable:
def condition(filter_value: t.Any, filter_operator: str, field: t.Any) -> t.Any:
if filter_operator == "lt":
return field < filter_value
elif filter_operator == "lte":
return field <= filter_value
elif filter_operator == "eq":
return field == filter_value
elif filter_operator == "neq":
return field != filter_value
elif filter_operator == "gt":
return field > filter_value
elif filter_operator == "gte":
return field >= filter_value
return condition
@get_filter_comparator.register(Boolean)
def _get_boolean_comparator(_: t.Any) -> t.Callable:
def condition(filter_value: t.Any, filter_operator: str, field: t.Any) -> t.Any:
if filter_operator == "EQUALS":
return field == filter_value
elif filter_operator == "NOTEQUALS":
return field != filter_value
return condition
@get_filter_comparator.register(ChoiceType)
def _get_choice_comparator(_: t.Any) -> t.Callable:
def condition(filter_value: t.Any, filter_operator: str, field: t.Any) -> t.Any:
if filter_operator == "INCLUDES":
return field == filter_value
return condition
def generate_filters(table: t.Any, info: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Any:
sqla_filters = []
if "filter" in kwargs and kwargs["filter"] is not None:
mapper = get_mapper(table)
gql_filters = kwargs["filter"]
for filter_name, gql_filter in gql_filters.items():
gql_filter_value = gql_filter["value"]
filter_name = underscore(filter_name)
if filter_name in table.c:
filter_type = table.c[filter_name].type
elif filter_name in mapper.relationships:
rel = mapper.relationships[filter_name]
rel_mapper = get_mapper(rel.target)
gql_filter_value = (
info.context.query(rel_mapper.class_)
.filter_by(id=gql_filter_value)
.one()
)
filter_type = rel
else:
raise KeyError(filter_name)
sql_filter = get_filter_comparator(filter_type)(
gql_filter_value,
gql_filter["operator"],
getattr(mapper.class_, filter_name),
)
sqla_filters.append(sql_filter)
return sqla_filters
|
from .__main__ import create_app
application = create_app()
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""comm_helper"""
from mindspore.parallel._ps_context import _is_role_pserver, _is_role_sched
from ._hccl_management import load_lib as hccl_load_lib
_HCCL_AVAILABLE = False
_NCCL_AVAILABLE = False
try:
import mindspore._ms_mpi as mpi
_NCCL_AVAILABLE = True
except ImportError:
_NCCL_AVAILABLE = False
try:
hccl_load_lib()
_HCCL_AVAILABLE = True
except RuntimeError:
_HCCL_AVAILABLE = False
if _HCCL_AVAILABLE:
from . import _hccl_management as hccl
else:
try:
import hccl_test.manage.api as hccl
_HCCL_AVAILABLE = True
except ImportError:
_HCCL_AVAILABLE = False
HCCL_WORLD_COMM_GROUP = "hccl_world_group"
NCCL_WORLD_COMM_GROUP = "nccl_world_group"
class Backend:
"""
Class for available backends.
Note:
The backends' value should be string, e.g., "hccl".
If backend is set to Backend.UNDEFINED, it will be seen as invaliad.
Args:
name (str): The name of backend.
Raises:
TypeError: If name is not a string.
ValueError: If backend is invalid.
Examples:
>>> Backend("abc")
>>> hccl = Backend("hccl")
"""
UNDEFINED = "undefined"
HCCL = "hccl"
NCCL = "nccl"
def __new__(cls, name):
"""Create instance object of Backend."""
if not isinstance(name, str):
raise TypeError("Backend name must be a string, but got {}".format(type(name)))
value = getattr(Backend, name.upper(), Backend.UNDEFINED)
if value == Backend.UNDEFINED:
raise ValueError("Invalid backend: '{}'".format(name))
return value
def is_hccl_available():
"""
Check hccl api is available.
Returns:
Boolean. Return whether hccl is available or not.
"""
return _HCCL_AVAILABLE
def is_nccl_available():
"""
Check nccl api is available.
Returns:
Boolean. Return whether nccl is available or not.
"""
return _NCCL_AVAILABLE
def check_parameter_available(func):
"""
Check parameter is available. If not available, raise Error.
Args:
func (Function): The function to be run.
Raises:
RuntimeError.
Returns:
Wrapper. If not available, raise Error.
"""
def wrapper(*args, **kargs):
if _is_role_pserver() or _is_role_sched():
return func(*args, **kargs)
group = None
if "group" in kargs.keys():
group = kargs.get("group")
if group is not None and not isinstance(group, str):
raise TypeError("Group should be str or None, "
"but got group {}".format(type(group)))
if "backend" in kargs.keys():
backend = kargs.get("backend")
if backend is Backend.HCCL and not is_hccl_available():
raise RuntimeError("Distributed Communication doesn't have HCCL built in")
if backend is Backend.NCCL and not is_nccl_available():
raise RuntimeError("Distributed Communication doesn't have NCCL built in")
if group is None:
if backend is Backend.HCCL:
group = HCCL_WORLD_COMM_GROUP
elif backend is Backend.NCCL:
group = NCCL_WORLD_COMM_GROUP
return func(*args, **kargs)
return wrapper
@check_parameter_available
def _get_rank_helper(group, backend):
"""
The Helper to do get_rank_id.
Args:
group (str): The communication group.
backend (str): The backend, like "hccl".
Raises:
ValueError: If backend is invalid.
Returns:
Integer. The local rank id of the calling process.
"""
rank_id = None
if _is_role_pserver() or _is_role_sched():
rank_id = 0
return rank_id
if backend == Backend.HCCL:
if group == HCCL_WORLD_COMM_GROUP:
rank_id = hccl.get_rank_id()
else:
rank_id = hccl.get_rank_id(group)
elif backend == Backend.NCCL:
rank_id = mpi.get_rank_id(group)
else:
raise ValueError("Invalid backend: '{}'".format(backend))
return rank_id
@check_parameter_available
def _get_local_rank_helper(group, backend):
"""
The Helper to do get_local_rank_id.
Args:
group (str): The communication group.
backend (str): The backend, like "hccl".
Raises:
ValueError: If backend is invalid.
Returns:
Integer. The local rank id of the calling process.
"""
rank_id = None
if backend == Backend.HCCL:
if group == HCCL_WORLD_COMM_GROUP:
rank_id = hccl.get_local_rank_id()
else:
rank_id = hccl.get_local_rank_id(group)
elif backend == Backend.NCCL:
raise RuntimeError("Nccl doesn't support get_local_rank_id now.")
else:
raise ValueError("Invalid backend: '{}'".format(backend))
return rank_id
@check_parameter_available
def _get_size_helper(group, backend):
"""
The Helper to do get_rank_size.
Args:
group (str): The communication group.
backend (str): The backend, like "hccl".
Raises:
ValueError: If backend is invalid.
Returns:
Integer. The rank size of specified group.
"""
size = None
if _is_role_pserver() or _is_role_sched():
size = 1
return size
if backend == Backend.HCCL:
if group == HCCL_WORLD_COMM_GROUP:
size = hccl.get_rank_size()
else:
size = hccl.get_rank_size(group)
elif backend == Backend.NCCL:
size = mpi.get_rank_size(group)
else:
raise ValueError("Invalid backend: '{}'".format(backend))
return size
@check_parameter_available
def _get_local_size_helper(group, backend):
"""
The Helper to do get_local_rank_size.
Args:
group (str): The communication group.
backend (str): The backend, like "hccl".
Raises:
ValueError: If backend is invalid.
Returns:
Integer. The local rank size where the calling process is being within specified group.
"""
size = None
if backend == Backend.HCCL:
if group == HCCL_WORLD_COMM_GROUP:
size = hccl.get_local_rank_size()
else:
size = hccl.get_local_rank_size(group)
elif backend == Backend.NCCL:
raise RuntimeError("Nccl doesn't support get_local_rank_size now.")
else:
raise ValueError("Invalid backend: '{}'".format(backend))
return size
@check_parameter_available
def _get_world_rank_from_group_rank_helper(group, group_rank_id, backend):
"""
The Helper to do get_world_rank_from_group_rank.
Args:
group (str): The user communication group.
group_rank_id (int): A rank id in user communication group.
backend (str): The backend, like "hccl".
Raises:
TypeError: If group_rank_id is not int.
ValueError: If group is "hccl_world_group" or backend is invalid.
Returns:
Integer. A rank id in world communication group.
"""
world_rank_id = None
if not isinstance(group_rank_id, int):
raise TypeError("group_rank_id should be int, but got type {}".format(type(group_rank_id)))
if backend == Backend.HCCL:
if group == HCCL_WORLD_COMM_GROUP:
raise ValueError("Group cannot be 'hccl_world_group'. ")
world_rank_id = hccl.get_world_rank_from_group_rank(group, group_rank_id)
elif backend == Backend.NCCL:
raise RuntimeError("Nccl doesn't support get_world_rank_from_group_rank now.")
else:
raise ValueError("Invalid backend: '{}'".format(backend))
return world_rank_id
@check_parameter_available
def _get_group_rank_from_world_rank_helper(world_rank_id, group, backend):
"""
The Helper to do get_group_rank_from_world_rank.
Args:
world_rank_id (int): A rank id in world communication group.
group (str): The user communication group.
backend (str): The backend, like "hccl".
Raises:
TypeError: If world_rank_id is not int.
ValueError: If group is 'hccl_world_group' or backend is invalid.
Returns:
Integer. A rank id in user communication group.
"""
group_rank_id = None
if not isinstance(world_rank_id, int):
raise TypeError("world_rank_id should be int, but got type {}".format(type(world_rank_id)))
if backend == Backend.HCCL:
if group == HCCL_WORLD_COMM_GROUP:
raise ValueError("Group cannot be 'hccl_world_group'. ")
group_rank_id = hccl.get_group_rank_from_world_rank(world_rank_id, group)
elif backend == Backend.NCCL:
raise RuntimeError("Nccl doesn't support get_group_rank_from_world_rank now.")
else:
raise ValueError("Invalid backend: '{}'".format(backend))
return group_rank_id
@check_parameter_available
def _create_group_helper(group, rank_ids, backend):
"""
The Helper to do create_group.
Args:
group (str): The communication group.
rank_ids (list): Rank ids in the group.
backend (str): The backend, like "hccl".
Raises:
TypeError: If rank_ids is not a list.
ValueError: If rank_ids size is not larger than 1 or rank_ids has duplicate data or backend is invalid.
"""
if backend == Backend.HCCL:
if not isinstance(rank_ids, list):
raise TypeError("Rank_ids {} should be list".format(rank_ids))
rank_size = len(rank_ids)
if rank_size < 1:
raise ValueError("Rank_ids size {} should be large than 0".format(rank_size))
if len(rank_ids) - len(list(set(rank_ids))) > 0:
raise ValueError("List rank_ids in Group {} has duplicate data!".format(group))
hccl.create_group(group, rank_size, rank_ids)
elif backend == Backend.NCCL:
raise RuntimeError("Nccl doesn't support create_group now.")
else:
raise ValueError("Invalid backend: '{}'".format(backend))
@check_parameter_available
def _destroy_group_helper(group, backend):
"""
The Helper to do destroy_group.
Args:
group (str): The user communication group.
backend (str): The backend, like "hccl".
Raises:
ValueError: If group is "hccl_world_group" or backend is invalid.
"""
if backend == Backend.HCCL:
if group == HCCL_WORLD_COMM_GROUP:
raise ValueError("The hccl_world_group does not support destruction.")
hccl.destroy_group(group)
elif backend == Backend.NCCL:
raise RuntimeError("Nccl doesn't support destroy_group now.")
else:
raise ValueError("Invalid backend: '{}'".format(backend))
|
from features.arduino_features import BlackrockSerialDIORowByte, SerialDIORowByte
from riglib import experiment
class par(object):
def init(self):
pass
class F(BlackrockSerialDIORowByte, par):
pass
f = F()
f.init()
|
import logging
import chevron
import re
from itertools import dropwhile
import snex.util as util
logger = logging.getLogger(__name__)
DEFAULT = {
# :snippet global-default-config lang: python
"output_template": "```{{lang}}\n{{{snippet}}}\n```\n",
"valid_param_keys": ["name", "lang", "lnum", "fname", "path"],
"output_path": "snippets",
"line_prefix": "",
"comment_prefix": "# ",
"comment_suffix": "",
"snippet_start": ":snippet",
"snippet_end": ":endsnippet",
"cloak_start": ":cloak",
"cloak_end": ":endcloak",
"output_suffix": ".md",
# :endsnippet
}
class Snippet:
@classmethod
def from_raw_data(cls, params, data, origin=None, prefix=""):
(head,) = data[:1]
(tail,) = data[-1:]
body = data[1:-1]
idlvl = util.det_indent_lvl(body)
body = (line[idlvl:] for line in body)
body = dropwhile(util.is_empty, body)
body = util.dropwhile_right(util.is_empty, list(body))
body = (prefix + line for line in body)
return cls(params=params, head=head.lstrip(), body=list(body), tail=tail.lstrip(), origin=origin)
@property
def name(self):
return self.params["name"]
@property
def line_number(self):
return self.params.get("lnum", -1)
def __init__(self, params=None, head=None, body=None, tail=None, origin=None):
self.params = params
self.head = head
self.body = body
self.tail = tail
self.origin = origin
def __repr__(self):
return f"Snippet({self.params!r}, {self.head!r}, {self.body!r}, {self.tail!r}, {self.origin!r})"
def render_snippet(template, params, body):
return chevron.render(template, {**params, **{"snippet": "\n".join(body)}})
def get_configs(conf):
configs = conf["config"]
default = configs["default"] if "default" in configs else None
for name in [n for n in configs if not n == "default"]:
c = util.merge_with_default_conf(configs[name], default, global_default=DEFAULT)
# prevent defining a global name for all snippets in the config
c.pop("name", None)
yield (name, c)
def extract_from_path(f, conf, base_path):
comment_prefix = re.escape(conf["comment_prefix"])
comment_suffix = re.escape(conf["comment_suffix"])
line_prefix = conf["line_prefix"]
cloak_start = re.escape(conf["cloak_start"])
cloak_end = re.escape(conf["cloak_end"])
cloak_start_re = f"^\\s*{comment_prefix}{cloak_start}{comment_suffix}$"
cloak_end_re = f"^\\s*{comment_prefix}{cloak_end}{comment_suffix}$"
snippet_start = re.escape(conf["snippet_start"])
snippet_end = re.escape(conf["snippet_end"])
snippet_start_re = f"^\\s*{comment_prefix}{snippet_start}(.*){comment_suffix}$"
snippet_end_re = f"^\\s*{comment_prefix}{snippet_end}{comment_suffix}$"
snippets = []
in_snippet = False
cloaked = False
data = []
params = {}
for idx, line in util.read_path(f):
lnum = idx + 1
if re.search(cloak_end_re, line):
cloaked = False
continue
if re.search(cloak_start_re, line):
cloaked = True
if cloaked:
continue
if match := re.search(snippet_start_re, line):
try:
params = util.construct_params(match.group(1), f, base_path, lnum)
params = util.sanitize_params(params, conf["valid_param_keys"])
except Exception as ex:
logger.error(f"could not parse snippet params: {line} in file {f}:{lnum}")
raise ex
in_snippet = True
if not in_snippet:
continue
data.append(line.rstrip("\n"))
if re.search(snippet_end_re, line):
in_snippet = False
s = Snippet.from_raw_data(params, data, origin=f, prefix=line_prefix)
snippets.append(s)
data = []
params = {}
return snippets
|
__all__ = ('modulize',)
import sys
from types import FunctionType, GetSetDescriptorType, MappingProxyType, ModuleType
from .docs import has_docs
NoneType = type(None)
try:
from _weakref import ref as WeakrefType
except ImportError:
from weakref import ref as WeakrefType
# This 2 type can be function
WrapperDescriptorType = type(object.__ne__)
MethodDescriptorType = type(object.__format__)
DO_NOT_MODULIZE_TYPES = [MappingProxyType, GetSetDescriptorType]
if WrapperDescriptorType is not FunctionType:
DO_NOT_MODULIZE_TYPES.append(WrapperDescriptorType)
if MethodDescriptorType is not FunctionType:
DO_NOT_MODULIZE_TYPES.append(MethodDescriptorType)
DO_NOT_MODULIZE_TYPES = tuple(DO_NOT_MODULIZE_TYPES)
@has_docs
def _modulize_function(old, globals_, source_module, module_name, module_path):
"""
Changes the given function's scopes and qualname if they were defined inside of a modulized class.
Parameters
----------
old : `function`
A function present inside of a modulized class.
globals_ : `dict` of (`str`, `Any`)
Global variables of the respective module.
source_module : `module`
The module, where the modulized class was defined.
module_name : `str`
The newly created module's name.
module_path : `str`
The newly created module's path.
Returns
-------
new : `function`
Newly recreated function if applicable.
"""
if old.__module__ != source_module:
return old
new = FunctionType(old.__code__, globals_, old.__name__, old.__defaults__, old.__closure__)
new.__module__ = module_path
qualname = old.__qualname__
if (qualname is not None) and (len(qualname) > len(module_name)) and qualname[len(module_name)] =='.' and \
qualname.startswith(module_name):
new.__qualname__ = qualname[len(module_name) + 1:]
return new
@has_docs
def _modulize_type(klass, globals_, source_module, module_name, module_path):
"""
Changes the given class's scopes and qualname if they were defined inside of a modulized class.
Parameters
----------
klass : `type`
A class present inside of a modulized class.
globals_ : `dict` of (`str`, `Any`)
Global variables of the respective module.
source_module : `module`
The module, where the modulized class was defined.
module_name : `str`
The newly created module's name.
module_path : `str`
The newly created module's path.
"""
if klass.__module__ != source_module:
return
qualname = klass.__qualname__
if (qualname is None) or (len(qualname) <= len(module_name)) or qualname[len(module_name)] != '.' \
or not qualname.startswith(module_name):
return
klass.__qualname__ = qualname[len(module_name) + 1:]
klass.__module__ = module_path
for name in dir(klass):
value = getattr(klass, name)
value_type = value.__class__
if value_type is FunctionType:
value = _modulize_function(value, globals_, source_module, module_name, module_path)
setattr(klass, name, value)
if issubclass(value_type, type):
_modulize_type(value, globals_, source_module, module_name, module_path)
@has_docs
def modulize(klass):
"""
Transforms the given class to a module.
Every functions and classes defined inside of given class, which are also present at transformation as well, will
have their global scope modified.
Parameters
----------
klass : `type`
The class to transform to module.
Returns
-------
result_module : `module`
The created module object.
Raises
------
TypeError
If `klass` is not given as `type`.
"""
if not isinstance(klass, type):
raise TypeError(
f'Only types can be modulized, got {klass.__class__.__name__}; {klass!r}.'
)
source_module = klass.__module__
module_name = klass.__name__
module_path = f'{klass.__module__}.{module_name}'
try:
result_module = sys.modules['module_path']
except KeyError:
result_module = ModuleType(module_path)
sys.modules[module_path] = result_module
globals_ = result_module.__dict__
globals_['__builtins__'] = __builtins__
else:
globals_ = result_module.__dict__
collected_names = []
for name in globals_.keys():
if name.startswith('__') and name.endswith('__'):
continue
collected_names.append(name)
for name in collected_names:
del globals_[name]
globals_['__doc__'] = None
for name in type.__dir__(klass):
if name.startswith('__') and name.endswith('__') and name != '__doc__':
continue
value = type.__getattribute__(klass, name)
value_type = type(value)
if value_type in DO_NOT_MODULIZE_TYPES:
continue
if value_type is FunctionType:
value = _modulize_function(value, globals_, source_module, module_name, module_path)
if issubclass(value_type, type):
_modulize_type(value, globals_, source_module, module_name, module_path)
ModuleType.__setattr__(result_module, name, value)
return result_module
|
import shutil
import tempfile
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.cache import cache
from django.contrib.auth import get_user_model
from django.test import Client, TestCase, override_settings
from django.urls import reverse
from django import forms
from ..models import Post, Group, Follow
User = get_user_model()
TEMP_MEDIA_ROOT = tempfile.mkdtemp(dir=settings.BASE_DIR)
class PaginatorTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='NoName')
cls.user_client = Client()
cls.user_client.force_login(cls.user)
cls.count_posts_on_page = 10
cls.count_posts = 15
cls.rest_posts = cls.count_posts % cls.count_posts_on_page
cls.group = Group.objects.create(
title='Тестовый заголовок',
slug='test-slug',
description='Описание...'
)
posts = (
Post(
text='Тестовый текст поста.',
author=cls.user,
group=cls.group
) for i in range(cls.count_posts)
)
Post.objects.bulk_create(posts, cls.count_posts)
def test_paginator_pages(self):
pages_paginator = [
reverse(
'posts:index'
),
reverse(
'posts:group_list',
kwargs={'slug': PaginatorTest.group.slug}
),
reverse(
'posts:profile',
kwargs={'username': PaginatorTest.user}
)
]
for page in pages_paginator:
with self.subTest(page=page):
response = PaginatorTest.user_client.get(page)
self.assertEqual(
len(response.context['page_obj']), (
PaginatorTest.count_posts_on_page
)
)
response = PaginatorTest.user_client.get(
page + '?page=2'
)
self.assertEqual(
len(response.context['page_obj']),
PaginatorTest.rest_posts
)
@override_settings(MEDIA_ROOT=TEMP_MEDIA_ROOT)
class PostPagesTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='NoName')
cls.user_client = Client()
cls.user_client.force_login(cls.user)
small_gif = (
b'\x47\x49\x46\x38\x39\x61\x02\x00'
b'\x01\x00\x80\x00\x00\x00\x00\x00'
b'\xFF\xFF\xFF\x21\xF9\x04\x00\x00'
b'\x00\x00\x00\x2C\x00\x00\x00\x00'
b'\x02\x00\x01\x00\x00\x02\x02\x0C'
b'\x0A\x00\x3B'
)
image = SimpleUploadedFile(
name='small.gif',
content=small_gif,
content_type='image/gif'
)
cls.group = Group.objects.create(
title='Тестовый заголовок',
slug='test-slug',
description='Описание...'
)
cls.post = Post.objects.create(
text='Тестовый текст поста.',
image=image,
author=cls.user,
group=cls.group
)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
shutil.rmtree(TEMP_MEDIA_ROOT, ignore_errors=True)
def test_pages_uses_correct_template(self):
templates_pages_names = {
reverse('posts:index'): 'posts/index.html',
reverse(
'posts:group_list',
kwargs={'slug': PostPagesTests.group.slug}
): 'posts/group_list.html',
reverse(
'posts:profile',
kwargs={'username': PostPagesTests.user}
): 'posts/profile.html',
reverse(
'posts:post_detail',
kwargs={'post_id': PostPagesTests.post.id}
): 'posts/post_detail.html',
reverse('posts:post_create'): 'posts/create_post.html',
reverse(
'posts:post_edit',
kwargs={'post_id': PostPagesTests.post.id}
): 'posts/create_post.html'
}
for reverse_name, template in templates_pages_names.items():
with self.subTest(reverse_name=reverse_name):
response = PostPagesTests.user_client.get(reverse_name)
self.assertTemplateUsed(response, template)
def check_post_context(self, post):
self.assertEqual(post.id, PostPagesTests.post.id)
self.assertEqual(post.author, PostPagesTests.post.author)
self.assertEqual(post.text, PostPagesTests.post.text)
self.assertEqual(post.image, PostPagesTests.post.image)
self.assertEqual(post.group, PostPagesTests.post.group)
def test_index_page_context(self):
response = PostPagesTests.user_client.get(reverse('posts:index'))
self.check_post_context(response.context['page_obj'][0])
def test_group_list_page_context(self):
response = PostPagesTests.user_client.get(
reverse(
'posts:group_list',
kwargs={'slug': PostPagesTests.group.slug}
)
)
self.check_post_context(response.context['page_obj'][0])
self.assertEqual(
PostPagesTests.group,
response.context['group']
)
def test_new_group_list_none(self):
group = Group.objects.create(
title='Тестовый заголовок',
slug='test-slug-new',
description='Описание...'
)
response = PostPagesTests.user_client.get(
reverse(
'posts:group_list',
kwargs={'slug': group.slug}
)
)
self.assertEqual(len(response.context['page_obj']), 0)
def test_profile_page_context(self):
response = PostPagesTests.user_client.get(
reverse(
'posts:profile',
kwargs={'username': PostPagesTests.user}
)
)
self.check_post_context(response.context['page_obj'][0])
self.assertEqual(
PostPagesTests.user,
response.context['author']
)
self.assertIsNotNone(response.context['following'])
def test_post_detail_page_context(self):
response = PostPagesTests.user_client.get(
reverse(
'posts:post_detail',
kwargs={'post_id': PostPagesTests.post.id}
)
)
self.check_post_context(response.context['post'])
form_fields = {'text': forms.fields.CharField}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_fields = response.context['form'].fields.get(value)
self.assertIsInstance(form_fields, expected)
self.assertIsNotNone(response.context['comments'])
def test_edit_post_page_context(self):
response = PostPagesTests.user_client.get(
reverse(
'posts:post_edit',
kwargs={'post_id': PostPagesTests.post.id}
)
)
self.assertIsNotNone(response.context['form'])
self.assertIsNotNone(response.context['is_edit'])
self.assertTrue(response.context['is_edit'])
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_fields = response.context['form'].fields.get(value)
self.assertIsInstance(form_fields, expected)
def test_create_post_page_context(self):
response = PostPagesTests.user_client.get(
reverse('posts:post_create')
)
form_fields = {
'text': forms.fields.CharField,
'group': forms.fields.ChoiceField,
}
for value, expected in form_fields.items():
with self.subTest(value=value):
form_fields = response.context['form'].fields.get(value)
self.assertIsInstance(form_fields, expected)
def test_cache_index(self):
post = Post.objects.create(
text='Тестовый текст поста.',
author=PostPagesTests.user,
)
response = PostPagesTests.user_client.get(
reverse('posts:index')
)
page = response.content
post.delete()
response = PostPagesTests.user_client.get(
reverse('posts:index')
)
self.assertEqual(page, response.content)
cache.clear()
response = PostPagesTests.user_client.get(
reverse('posts:index')
)
self.assertNotEqual(page, response.content)
class FollowTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='NoName')
cls.user_client = Client()
cls.user_client.force_login(cls.user)
cls.author = User.objects.create_user(username='Author')
cls.author_client = Client()
cls.author_client.force_login(cls.author)
def test_following_auth(self):
#FIXME: connection through ORM
FollowTests.user_client.get(
reverse(
'posts:profile_follow',
kwargs={'username': FollowTests.author}
)
)
#FIXME: necessary to calculate that a BD increased by one object
follow = Follow.objects.last()
self.assertEqual(follow.user, FollowTests.user)
self.assertEqual(follow.author, FollowTests.author)
def test_unfollow_auth(self):
#FIXME: connection through ORM
FollowTests.user_client.get(
reverse(
'posts:profile_follow',
kwargs={'username': FollowTests.author}
)
)
follows_count = Follow.objects.count()
FollowTests.user_client.get(
reverse(
'posts:profile_unfollow',
kwargs={'username': FollowTests.author}
)
)
self.assertEqual(Follow.objects.count(), follows_count - 1)
def test_new_post_follow(self):
FollowTests.user_client.get(
reverse(
'posts:profile_follow',
kwargs={'username': FollowTests.author}
)
)
post = Post.objects.create(
text='Тестовый текст поста.',
author=FollowTests.author,
)
response = FollowTests.user_client.get(
reverse(
'posts:follow_index'
)
)
self.assertEqual(
post.id, response.context['page_obj'][0].id
)
def test_new_post_unfollow(self):
FollowTests.user_client.get(
reverse(
'posts:profile_follow',
kwargs={'username': FollowTests.author}
)
)
Post.objects.create(
text='Тестовый текст поста.',
author=FollowTests.author,
)
user = User.objects.create_user(username='NameNo')
user_client = Client()
user_client.force_login(user)
response = user_client.get(
reverse(
'posts:follow_index'
)
)
self.assertEqual(len(response.context['page_obj']), 0)
|
from sqlalchemy import Column
from sqlalchemy import Integer
from alembic import command
from alembic import op
from alembic.testing import config
from alembic.testing.env import _no_sql_testing_config
from alembic.testing.env import clear_staging_env
from alembic.testing.env import staging_env
from alembic.testing.env import three_rev_fixture
from alembic.testing.fixtures import capture_context_buffer
from alembic.testing.fixtures import op_fixture
from alembic.testing.fixtures import TestBase
from alembic.util import sqla_compat
class FullEnvironmentTests(TestBase):
@classmethod
def setup_class(cls):
staging_env()
cls.cfg = cfg = _no_sql_testing_config("oracle")
cls.a, cls.b, cls.c = three_rev_fixture(cfg)
@classmethod
def teardown_class(cls):
clear_staging_env()
def test_begin_comit(self):
with capture_context_buffer(transactional_ddl=True) as buf:
command.upgrade(self.cfg, self.a, sql=True)
assert "SET TRANSACTION READ WRITE\n\n/" in buf.getvalue()
assert "COMMIT\n\n/" in buf.getvalue()
def test_batch_separator_default(self):
with capture_context_buffer() as buf:
command.upgrade(self.cfg, self.a, sql=True)
assert "/" in buf.getvalue()
assert ";" not in buf.getvalue()
def test_batch_separator_custom(self):
with capture_context_buffer(oracle_batch_separator="BYE") as buf:
command.upgrade(self.cfg, self.a, sql=True)
assert "BYE" in buf.getvalue()
class OpTest(TestBase):
def test_add_column(self):
context = op_fixture("oracle")
op.add_column("t1", Column("c1", Integer, nullable=False))
context.assert_("ALTER TABLE t1 ADD c1 INTEGER NOT NULL")
def test_add_column_with_default(self):
context = op_fixture("oracle")
op.add_column(
"t1", Column("c1", Integer, nullable=False, server_default="12")
)
context.assert_("ALTER TABLE t1 ADD c1 INTEGER DEFAULT '12' NOT NULL")
@config.requirements.comments
def test_add_column_with_comment(self):
context = op_fixture("oracle")
op.add_column(
"t1", Column("c1", Integer, nullable=False, comment="c1 comment")
)
context.assert_(
"ALTER TABLE t1 ADD c1 INTEGER NOT NULL",
"COMMENT ON COLUMN t1.c1 IS 'c1 comment'",
)
@config.requirements.computed_columns_api
def test_add_column_computed(self):
context = op_fixture("oracle")
op.add_column(
"t1",
Column("some_column", Integer, sqla_compat.Computed("foo * 5")),
)
context.assert_(
"ALTER TABLE t1 ADD some_column "
"INTEGER GENERATED ALWAYS AS (foo * 5)"
)
def test_alter_table_rename_oracle(self):
context = op_fixture("oracle")
op.rename_table("s", "t")
context.assert_("ALTER TABLE s RENAME TO t")
def test_alter_table_rename_schema_oracle(self):
context = op_fixture("oracle")
op.rename_table("s", "t", schema="myowner")
context.assert_("ALTER TABLE myowner.s RENAME TO t")
def test_alter_column_rename_oracle(self):
context = op_fixture("oracle")
op.alter_column("t", "c", new_column_name="x")
context.assert_("ALTER TABLE t RENAME COLUMN c TO x")
def test_alter_column_new_type(self):
context = op_fixture("oracle")
op.alter_column("t", "c", type_=Integer)
context.assert_("ALTER TABLE t MODIFY c INTEGER")
def test_alter_column_add_comment(self):
context = op_fixture("oracle")
op.alter_column("t", "c", type_=Integer, comment="c comment")
context.assert_(
"ALTER TABLE t MODIFY c INTEGER",
"COMMENT ON COLUMN t.c IS 'c comment'",
)
def test_alter_column_add_comment_quotes(self):
context = op_fixture("oracle")
op.alter_column("t", "c", type_=Integer, comment="c 'comment'")
context.assert_(
"ALTER TABLE t MODIFY c INTEGER",
"COMMENT ON COLUMN t.c IS 'c ''comment'''",
)
def test_alter_column_drop_comment(self):
context = op_fixture("oracle")
op.alter_column("t", "c", type_=Integer, comment=None)
context.assert_(
"ALTER TABLE t MODIFY c INTEGER", "COMMENT ON COLUMN t.c IS ''"
)
def test_create_table_comment(self):
# this is handled by SQLAlchemy's compilers
context = op_fixture("oracle")
op.create_table_comment("t2", comment="t2 table", schema="foo")
context.assert_("COMMENT ON TABLE foo.t2 IS 't2 table'")
def test_drop_table_comment(self):
# this is handled by SQLAlchemy's compilers
context = op_fixture("oracle")
op.drop_table_comment("t2", existing_comment="t2 table", schema="foo")
context.assert_("COMMENT ON TABLE foo.t2 IS ''")
def test_drop_index(self):
context = op_fixture("oracle")
op.drop_index("my_idx", "my_table")
context.assert_contains("DROP INDEX my_idx")
def test_drop_column_w_default(self):
context = op_fixture("oracle")
op.drop_column("t1", "c1")
context.assert_("ALTER TABLE t1 DROP COLUMN c1")
def test_drop_column_w_check(self):
context = op_fixture("oracle")
op.drop_column("t1", "c1")
context.assert_("ALTER TABLE t1 DROP COLUMN c1")
def test_alter_column_nullable_w_existing_type(self):
context = op_fixture("oracle")
op.alter_column("t", "c", nullable=True, existing_type=Integer)
context.assert_("ALTER TABLE t MODIFY c NULL")
def test_alter_column_not_nullable_w_existing_type(self):
context = op_fixture("oracle")
op.alter_column("t", "c", nullable=False, existing_type=Integer)
context.assert_("ALTER TABLE t MODIFY c NOT NULL")
def test_alter_column_nullable_w_new_type(self):
context = op_fixture("oracle")
op.alter_column("t", "c", nullable=True, type_=Integer)
context.assert_(
"ALTER TABLE t MODIFY c NULL", "ALTER TABLE t MODIFY c INTEGER"
)
def test_alter_column_not_nullable_w_new_type(self):
context = op_fixture("oracle")
op.alter_column("t", "c", nullable=False, type_=Integer)
context.assert_(
"ALTER TABLE t MODIFY c NOT NULL", "ALTER TABLE t MODIFY c INTEGER"
)
def test_alter_add_server_default(self):
context = op_fixture("oracle")
op.alter_column("t", "c", server_default="5")
context.assert_("ALTER TABLE t MODIFY c DEFAULT '5'")
def test_alter_replace_server_default(self):
context = op_fixture("oracle")
op.alter_column(
"t", "c", server_default="5", existing_server_default="6"
)
context.assert_("ALTER TABLE t MODIFY c DEFAULT '5'")
def test_alter_remove_server_default(self):
context = op_fixture("oracle")
op.alter_column("t", "c", server_default=None)
context.assert_("ALTER TABLE t MODIFY c DEFAULT NULL")
def test_alter_do_everything(self):
context = op_fixture("oracle")
op.alter_column(
"t",
"c",
new_column_name="c2",
nullable=True,
type_=Integer,
server_default="5",
)
context.assert_(
"ALTER TABLE t MODIFY c NULL",
"ALTER TABLE t MODIFY c DEFAULT '5'",
"ALTER TABLE t MODIFY c INTEGER",
"ALTER TABLE t RENAME COLUMN c TO c2",
)
@config.requirements.comments
def test_create_table_with_column_comments(self):
context = op_fixture("oracle")
op.create_table(
"t2", Column("c1", Integer, primary_key=True), comment="t2 comment"
)
context.assert_(
"CREATE TABLE t2 (c1 INTEGER NOT NULL, PRIMARY KEY (c1))",
"COMMENT ON TABLE t2 IS 't2 comment'",
)
# TODO: when we add schema support
# def test_alter_column_rename_oracle_schema(self):
# context = op_fixture('oracle')
# op.alter_column("t", "c", name="x", schema="y")
# context.assert_(
# 'ALTER TABLE y.t RENAME COLUMN c TO c2'
# )
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for shell.bzl."""
load("//lib:shell.bzl", "shell")
load("//lib:unittest.bzl", "asserts", "unittest")
def _shell_array_literal_test(ctx):
"""Unit tests for shell.array_literal."""
env = unittest.begin(ctx)
asserts.equals(env, "()", shell.array_literal([]))
asserts.equals(env, "('1')", shell.array_literal([1]))
asserts.equals(env, "('1' '2' '3')", shell.array_literal([1, 2, 3]))
asserts.equals(env, "('$foo')", shell.array_literal(["$foo"]))
asserts.equals(env, "('qu\"o\"te')", shell.array_literal(['qu"o"te']))
unittest.end(env)
shell_array_literal_test = unittest.make(_shell_array_literal_test)
def _shell_quote_test(ctx):
"""Unit tests for shell.quote."""
env = unittest.begin(ctx)
asserts.equals(env, "'foo'", shell.quote("foo"))
asserts.equals(env, "'foo bar'", shell.quote("foo bar"))
asserts.equals(env, "'three spaces'", shell.quote("three spaces"))
asserts.equals(env, "' leading'", shell.quote(" leading"))
asserts.equals(env, "'trailing '", shell.quote("trailing "))
asserts.equals(env, "'new\nline'", shell.quote("new\nline"))
asserts.equals(env, "'tab\tcharacter'", shell.quote("tab\tcharacter"))
asserts.equals(env, "'$foo'", shell.quote("$foo"))
asserts.equals(env, "'qu\"o\"te'", shell.quote('qu"o"te'))
asserts.equals(env, "'it'\\''s'", shell.quote("it's"))
asserts.equals(env, "'foo\\bar'", shell.quote("foo\\bar"))
asserts.equals(env, "'back`echo q`uote'", shell.quote("back`echo q`uote"))
unittest.end(env)
shell_quote_test = unittest.make(_shell_quote_test)
def _shell_spawn_e2e_test_impl(ctx):
"""Test spawning a real shell."""
args = [
"foo",
"foo bar",
"three spaces",
" leading",
"trailing ",
"new\nline",
"tab\tcharacter",
"$foo",
'qu"o"te',
"it's",
"foo\\bar",
"back`echo q`uote",
]
script_content = "\n".join([
"#!/bin/bash",
"myarray=" + shell.array_literal(args),
'output=$(echo "${myarray[@]}")',
# For logging:
'echo "DEBUG: output=[${output}]" >&2',
# The following is a shell representation of what the echo of the quoted
# array will look like. It looks a bit confusing considering it's shell
# quoted into Python. Shell using single quotes to minimize shell
# escaping, so only the single quote needs to be escaped as '\'', all
# others are essentially kept literally.
"expected='foo foo bar three spaces leading trailing new",
"line tab\tcharacter $foo qu\"o\"te it'\\''s foo\\bar back`echo q`uote'",
'[[ "${output}" == "${expected}" ]]',
])
script_file = ctx.actions.declare_file("%s.sh" % (ctx.label.name))
ctx.actions.write(
output = script_file,
content = script_content,
is_executable = True,
)
return [
DefaultInfo(executable = script_file),
]
shell_spawn_e2e_test = rule(
test = True,
implementation = _shell_spawn_e2e_test_impl,
)
def shell_test_suite():
"""Creates the test targets and test suite for shell.bzl tests."""
unittest.suite(
"shell_tests",
shell_array_literal_test,
shell_quote_test,
shell_spawn_e2e_test,
)
|
import strax
import straxen
import wfsim
def test_nt_context(register=None, context=None):
"""
Test a context if it is properly setup. To this end, we perform a
simple scan of the field "time" since all plugins should have this
field, if there is some option specified badly, we will quickly find
out since for this scan, we activate all the plugins.
:param register: Register a plugin (optional)
:param context: Test with some other context than the nT simulation
context
"""
if not straxen.utilix_is_configured():
return
if context is None:
context = straxen.contexts.xenonnt_simulation(cmt_run_id_sim='010000', cmt_version='global_ONLINE')
assert isinstance(context, strax.Context), f'{context} is not a context'
if register is not None:
assert issubclass(register, strax.Plugin), f'{register} is not a plugin'
# Search all plugins for the time field (each should have one)
context.search_field('time')
# def test_mc_chain():
# test_nt_context(wfsim.RawRecordsFromMcChain)
# def test_fax_nveto():
# test_nt_context(wfsim.RawRecordsFromFaxnVeto)
# def test_1t_context():
# test_nt_context(context=straxen.contexts.xenon1t_simulation())
|
import glob
import datetime
import inference
import numpy as np
flist = []
def run_classifier():
flist = []
list1 = glob.glob("./images/*.jpg")
list1.sort()
print("Printing the time of Interesting Events.....\n\n")
temp = str(inference.run_inference_on_image())
for i in range(len(list1) - 1):
inference.imagePath = list1[i]
temp2 = str(inference.run_inference_on_image2())
inference.imagePath = list1[i+1]
temp = str(inference.run_inference_on_image2())
if temp2 != temp:
print("Time : " + str(datetime.timedelta(seconds=(i))))
flist.extend([i])
else:
print("." ,)
d = np.array(flist)
d.sort()
diff = [y - x for x, y in zip(*[iter(d)] * 2)]
avg = sum(diff) / len(diff)
m = [[d[0]]]
for x in d[1:]:
if x - m[-1][0] < avg:
m[-1].append(x)
else:
m.append([x])
# print(m)
# print(type(m))
with open('list.txt' , 'w') as f:
print("Writing to file\n")
for i in range(0 , (len(m))):
with open('list.txt' , 'a') as f:
print("\n", file=f)
print("start time : " + str(datetime.timedelta(seconds = int((m[i][0])) )) , file=f)
print("end time : " + str(datetime.timedelta(seconds = int((m[i][len(m[i]) - 1])) )) , file=f )
print("\n\nFinished Analysis\n\n")
print("The timestanps of all interesting events is stored in a File named list.txt ")
if __name__ == '__main__':
run_classifier()
|
import re
import sys
import inspect
_py2 = sys.version_info[0] == 2
_py3 = sys.version_info[0] == 3
# noinspection PyPep8Naming
class route(object):
def __init__(self, rule, **options):
"""
Class Initializer - This will only execute if using BottleCBV's original route() style.
"""
# Not sure if this is needed, need to test what happens when you specify a rule but not options in BottleCBV.
if not options:
options = dict(method='ANY')
self.rule = rule
self.options = options
def __call__(self, func):
f = func
rule = self.rule
options = self.options
def decorator(*_, **__):
if not hasattr(f, '_rule_cache') or f._rule_cache is None:
f._rule_cache = {f.__name__: [(rule, options)]}
elif f.__name__ not in f._rule_cache:
f._rule_cache[f.__name__] = [(rule, options)]
else:
f._rule_cache[f.__name__].append((rule, options))
return f
return decorator()
@staticmethod
def decorate(f, rule, **options):
if not hasattr(f, '_rule_cache') or f._rule_cache is None:
f._rule_cache = {f.__name__: [(rule, options)]}
elif f.__name__ not in f._rule_cache:
f._rule_cache[f.__name__] = [(rule, options)]
else:
f._rule_cache[f.__name__].append((rule, options))
return f
@staticmethod
def get(rule):
"""
GET Method
CRUD Use Case: Read
Example:
Request a user profile
"""
options = dict(method='GET')
def decorator(f):
return route.decorate(f, rule, **options)
return decorator
@staticmethod
def post(rule):
"""
POST Method
CRUD Use Case: Create
Example:
Create a new user
"""
options = dict(method='POST')
def decorator(f):
return route.decorate(f, rule, **options)
return decorator
@staticmethod
def put(rule):
"""
PUT Method
CRUD Use Case: Update / Replace
Example:
Set item# 4022 to Red Seedless Grapes, instead of tomatoes
"""
options = dict(method='PUT')
def decorator(f):
return route.decorate(f, rule, **options)
return decorator
@staticmethod
def patch(rule):
"""
PATCH Method
CRUD Use Case: Update / Modify
Example:
Rename then user's name from Jon to John
"""
options = dict(method='PATCH')
def decorator(f):
return route.decorate(f, rule, **options)
return decorator
@staticmethod
def delete(rule):
"""
DELETE Method
CRUD Use Case: Delete
Example:
Delete user# 12403 (John)
"""
options = dict(method='DELETE')
def decorator(f):
return route.decorate(f, rule, **options)
return decorator
@staticmethod
def head(rule):
"""
HEAD Method
CRUD Use Case: Read (in-part)
Note: This is the same as GET, but without the response body.
This is useful for items such as checking if a user exists, such as this example:
Request: GET /user/12403
Response: (status code) 404 - Not Found
If you are closely following the REST standard, you can also verify if the requested PATCH (update) was
successfully applied, in this example:
Request: PUT /user/12404 { "name": "John"}
Response: (status code) 304 - Not Modified
"""
options = dict(method='HEAD')
def decorator(f):
return route.decorate(f, rule, **options)
return decorator
@staticmethod
def any(rule):
"""
From the Bottle Documentation:
The non-standard ANY method works as a low priority fallback: Routes that listen to ANY will match requests
regardless of their HTTP method but only if no other more specific route is defined. This is helpful for
proxy-routes that redirect requests to more specific sub-applications.
"""
options = dict(method='ANY')
def decorator(f):
return route.decorate(f, rule, **options)
return decorator
class BottleView(object):
""" Class based view implementation for bottle (following flask-classy architech)
"""
decorators = []
DEFAULT_ROUTES = ["get", "put", "post", "delete", "index", "options"]
base_route = None
route_prefix = None
view_identifier = "view"
@classmethod
def register(cls, app, base_route=None, route_prefix=None):
""" Register all the possible routes of the subclass
:param app: bottle app instance
:param base_route: prepend to the route rule (/base_route/<class_name OR route_prefix>)
:param route_prefix: used when want to register custom rule, which is not class name
"""
if cls is BottleView:
raise TypeError("cls must be a subclass of BottleView, not BottleView itself")
cls._app = app
cls.route_prefix = route_prefix or cls.route_prefix
cls.base_route = base_route or cls.base_route
# import ipdb; ipdb.set_trace()
# get all the valid members of the class to register Endpoints
routes = cls._get_interesting_members(BottleView)
# initialize the class
klass = cls()
# Iterate through class members to register Endpoints
for func_name, func in routes:
# print "*"*50
method_args = inspect.getargspec(func)[0]
# Get
rule = cls._build_route_rule(func_name, *method_args)
method = "GET"
if func_name in cls.DEFAULT_ROUTES:
if func_name == "index":
method = "GET"
else:
method = func_name.upper()
# create name for endpoint
endpoint = "%s:%s" % (cls.__name__, func_name)
callable_method = getattr(klass, func_name)
for decorator in cls.decorators:
callable_method = decorator(callable_method)
try:
# noinspection PyProtectedMember
custom_rule = func._rule_cache
except AttributeError:
method_args = inspect.getargspec(func)[0]
rule = cls._build_route_rule(func_name, *method_args)
method = "GET"
if func_name in cls.DEFAULT_ROUTES:
if func_name == "index":
method = "GET"
else:
method = func_name.upper()
cls._app.route(callback=callable_method, method=method,
path=rule, name=endpoint)
else:
custom_rule_list = list(custom_rule.values())
if _py3:
custom_rule_list = list(custom_rule_list)
for cached_rule in custom_rule_list[0]:
rule, options = cached_rule
try:
method = options.pop("method")
except KeyError:
method = "GET"
try:
endpoint = options.pop("name")
except KeyError:
pass
cls._app.route(callback=callable_method, path=rule,
method=method, name=endpoint, **options)
print(("%s : %s, Endpoint: %s" % (method, rule, endpoint)))
@classmethod
def _build_route_rule(cls, func_name, *method_args):
klass_name = cls.__name__.lower()
klass_name = (klass_name[:-len(cls.view_identifier)]
if klass_name.endswith(cls.view_identifier)
else klass_name)
rule = klass_name
if not (cls.base_route or cls.route_prefix):
rule = klass_name
elif not cls.base_route and cls.route_prefix:
rule = cls.route_prefix
elif cls.base_route and not cls.route_prefix:
rule = "%s/%s" % (cls.base_route, klass_name)
elif cls.base_route and cls.route_prefix:
rule = "%s/%s" % (cls.base_route, cls.route_prefix)
rule_parts = [rule]
if func_name not in cls.DEFAULT_ROUTES:
rule_parts.append(func_name.replace("_", "-").lower())
ignored_rule_args = ['self']
if hasattr(cls, 'base_args'):
ignored_rule_args += cls.base_args
for arg in method_args:
if arg not in ignored_rule_args:
rule_parts.append("<%s>" % arg)
result = "/%s/" % join_paths(*rule_parts)
result = re.sub(r'(/)\1+', r'\1', result)
result = re.sub("/{2,}", "/", result)
return result
@classmethod
def _get_interesting_members(cls, base_class):
"""Returns a list of methods that can be routed to"""
base_members = dir(base_class)
predicate = inspect.ismethod if _py2 else inspect.isfunction
all_members = inspect.getmembers(cls, predicate=predicate)
return [member for member in all_members
if not member[0] in base_members
and ((hasattr(member[1], "__self__")
and not member[1].__self__ in cls.__class__.__mro__) if _py2 else True)
and not member[0].startswith("_")]
def join_paths(*path_pieces):
"""Join parts of a url path"""
# Remove blank strings, and make sure everything is a string
cleaned_parts = list(map(str, [_f for _f in path_pieces if _f]))
if _py3:
cleaned_parts = list(cleaned_parts)
return "/".join(cleaned_parts)
|
#!/usr/bin/python
#
# Logic for computing missing and duplicate files using Postgresql DB.
#
import os
import fnmatch
# from shutil import copy2
# from shutil import copyfile
from dbutils import required as db
MISSING_FROM_COMP_FOLDER = "missing_from_comp"
MISSING_FROM_SRC_FOLDER = "missing_from_source"
FILE_NAME_COLUMN = "file"
FILE_EXTENSIONS_TO_SKIP = ['.ini', '.db', '.info', '.pdfcp ']
def handleDuplicates(conn):
# conn = db.getConnection(dbName)
cur = conn.cursor
try:
# duplicates on the source location
cur.execute("SELECT * from filehashes WHERE ")
# duplicates on the comparison location
cur.execute("SELECT * from filehashes ")
finally:
cur.close
# conn.close
db.returnConnection(conn)
#######################
# Logic for identifying missing files in the source and destination/comparison locations.
#######################
def handleMissing(dbName, sourceSchema, compSchema):
conn = db.getConnection(dbName)
cur = conn.cursor()
try:
# identify missing files from the comparison location
command = "SELECT %s from %s.filehashes where hash in (select hash from %s.filehashes except select hash from %s.filehashes);" %(FILE_NAME_COLUMN, sourceSchema, sourceSchema, compSchema)
print("Identifying all missing files in comparison location. Executing: " + command)
cur.execute( command )
missingInComp = cur.fetchall()
# print("Missing files in comparison location: %s" % missingInComp)
# __collectMissingFiles__(list(missingInComp), MISSING_FROM_COMP_FOLDER)
try:
for missingFile in missingInComp:
__collectMissingFiles__(missingFile[0], MISSING_FROM_COMP_FOLDER)
except Exception as ce:
print("There was a problem locating files on comparison location. The comparison location files may have changed/moved since the scan. ")
print(ce)
return
# identify missing files from the source location
command = "SELECT %s from %s.filehashes where hash in (select hash from %s.filehashes except select hash from %s.filehashes);" % (FILE_NAME_COLUMN, compSchema, compSchema, sourceSchema)
print("Identifying all missing files in source location. Executing: " + command)
cur.execute(command)
missingInSource = cur.fetchall()
# print("Missing files in source location: %s" % missingInSource)
# __collectMissingFiles__(list(missingInSource), MISSING_FROM_SRC_FOLDER)
try:
for missingFile in missingInSource:
__collectMissingFiles__(missingFile[0], MISSING_FROM_SRC_FOLDER)
except Exception as se:
print("There was a problem locating files on source location. The source location files may have changed/moved since the scan. ")
print(se)
except Exception as e:
print("Unable to identify missing files! Cause: " + e)
finally:
cur.close()
# conn.close
db.returnConnection(conn)
print("Done processing missing files. Please look at %s and %s folders for missing files." % (MISSING_FROM_SRC_FOLDER, MISSING_FROM_COMP_FOLDER))
#######################
# Utility for copying all of the missing files from the specified result-set into the specified folder.
#######################
def __collectMissingFiles__( missingFile, folderName ):
# for missingFile in missingFiles:
# missingFile.endswith(tuple(set(['.ini', '.db'])))
if not missingFile.endswith(tuple(FILE_EXTENSIONS_TO_SKIP)):
dst = "./" + folderName + missingFile
print("Attempting to copy missing file: " + missingFile + " to destination: " + dst)
if not os.path.exists(os.path.dirname(dst)):
os.makedirs(os.path.dirname(dst))
# TODO implement file type filtering to only get files we want and skip ones we don't care about like *.txt, *.ini, etc.
# if not fnmatch.fnmatch(missingFile, '.*.ini'):
# copyfile( missingFile, dst)
# copy2(missingFile, dst)
os.system('cp -v -p "' + missingFile + '" "' + dst + '"')
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class PagedGraphUsers(Model):
"""PagedGraphUsers.
:param continuation_token: This will be non-null if there is another page of data. There will never be more than one continuation token returned by a request.
:type continuation_token: list of str
:param graph_users: The enumerable set of users found within a page.
:type graph_users: list of :class:`GraphUser <graph.v4_1.models.GraphUser>`
"""
_attribute_map = {
'continuation_token': {'key': 'continuationToken', 'type': '[str]'},
'graph_users': {'key': 'graphUsers', 'type': '[GraphUser]'}
}
def __init__(self, continuation_token=None, graph_users=None):
super(PagedGraphUsers, self).__init__()
self.continuation_token = continuation_token
self.graph_users = graph_users
|
"""Decoder portion of the model."""
import torch
import torch.nn.functional as F
from torch import nn
def _build_base_decoder():
"""Builds the base decoder shared by all three decoder types."""
return nn.Sequential(nn.Conv2d(in_channels=1280, out_channels=256, kernel_size=(3, 3), stride=1, padding=1),
nn.BatchNorm2d(num_features=256), nn.ReLU())
class Decoders(nn.Module):
"""Module which contains all three decoders."""
def __init__(self, num_classes: int, enabled_tasks: (bool, bool, bool), output_size=(128, 256)):
super().__init__()
self._output_size = output_size
self._num_classes = num_classes
self._enabled_tasks = enabled_tasks
self._base_semseg = _build_base_decoder()
self._base_insseg = _build_base_decoder()
self._base_depth = _build_base_decoder()
kernel_size = (1, 1)
self._semsegcls = nn.Conv2d(256, self._num_classes, kernel_size)
self._inssegcls = nn.Conv2d(256, 2, kernel_size)
self._depthcls = nn.Conv2d(256, 1, kernel_size)
def set_output_size(self, size):
self._output_size = size
def forward(self, x):
"""Returns (sem seg, instance seg, depth)."""
# x: [batch x 1280 x H/8 x W/8]
sem_seg_enabled, inst_seg_enabled, depth_enabled = self._enabled_tasks
if sem_seg_enabled:
x1 = self._base_semseg(x)
x1 = self._semsegcls(x1)
x1 = F.interpolate(x1, size=self._output_size, mode='bilinear', align_corners=True)
else:
x1 = None
if inst_seg_enabled:
x2 = self._base_insseg(x)
x2 = self._inssegcls(x2)
x2 = F.interpolate(x2, size=self._output_size, mode='bilinear', align_corners=True)
else:
x2 = None
if depth_enabled:
x3 = self._base_depth(x)
x3 = self._depthcls(x3)
x3 = F.interpolate(x3, size=self._output_size, mode='bilinear', align_corners=True)
else:
x3 = None
return x1, x2, x3
if __name__ == '__main__':
# ### Shape test
output_size = (123, 432)
model = Decoders(num_classes=20, output_size=output_size)
test = torch.zeros(size=(2, 1280, 256, 256))
result = model.forward(test)
assert result[0].shape == (2, 20, *output_size), "output shape is {}".format(result[0].shape)
|
class Queue:
"""
A class used to represent an Queue for for storing processes
...
Attributes
----------
processList : list
The list which would store the process currently in line for execution
priority : str
The type of process (Foreground,Background etc)
SchedulingAlgo : object
The object for corresponding scheduling algorithm to be used in this queue (FCFS,SJF etc)
Methods
-------
queue_enqueue(pList,process)
Uses the scheduling algorithm object to insert process into process list
queue_dequeue(pList,quantum)
Uses the scheduling algorithm object to remove process from process list
size()
Returns the size of process list
isEmpty()
Checks whether the process list is empty or not
front()
Returns the first element in process list if its not empty.
"""
def __init__(self,type):
"""
Parameters
----------
type : str
The type of process (Foreground,Background etc)
"""
self.processList = []
self.priority = type
self.SchedulingAlgo = None
def size(self):
"""Returs the total number of processes left for execution.
Returns
-------
int :
The length of process list
"""
return len(self.processList)
def isEmpty(self):
"""Checks whether all processes have been executed or not.
Returns
-------
bool :
True if processList has 0 length , False otherwise.
"""
return self.size() == 0
def front(self):
"""Returns the first process in list
Returns
-------
object :
The first process if process list is not empty else None
"""
return self.processList[0] if not self.isEmpty else None
def queue_enqueue(self,process):
"""Uses the queue's scheduling algorithm enqueue method to insert
the new process into the process list.
Parameters
----------
process : object
The Process class object defining the new process to insert.
"""
self.SchedulingAlgo.enqueue(self.processList,process)
def queue_dequeue(self,quantum):
"""Takes the quantum time elapsed and removes processes from the
process list using scheduling algorithim;s dequeue method.
Parameters
----------
quantum : int
The time elapsed
Returns
-------
removed : list
A list of rocesses removed during the current cycle
quantum : int
Quantum time updated after being used by each process (Usually 0)
"""
removed,quantum = self.SchedulingAlgo.dequeue(self.processList,quantum)
return removed,quantum
|
import numpy as np
import editdistance
class Decoder():
def __init__(self, vocab):
self.vocab_list = [char for char in vocab]
def predict(self, batch_size, logits, y, lengths, y_lengths, n_show=5):
decoded = self.decode(logits, lengths)
cursor = 0
gt = []
n = min(n_show, logits.size(1))
samples = []
for b in range(batch_size):
y_str = ''.join([self.vocab_list[ch] for ch in y[cursor: cursor + y_lengths[b]]])
gt.append(y_str)
cursor += y_lengths[b]
if b < n:
samples.append([y_str, decoded[b]])
return decoded, gt, samples
def decode(self, logits, seq_lens):
raise NotImplementedError
def wer(self, s1, s2):
s1_words, s2_words = s1.split(), s2.split()
distance = editdistance.eval(s1_words, s2_words)
return distance / max(len(s1_words), len(s2_words))
def cer(self, s1, s2):
s1, s2 = s1.replace(' ', ''), s2.replace(' ', '')
distance = editdistance.eval(s1, s2)
return distance / max(len(s1), len(s2))
def cer_batch(self, decoded, gt):
return self.compare_batch(decoded, gt, self.cer)
def wer_batch(self, decoded, gt):
return self.compare_batch(decoded, gt, self.wer)
def compare_batch(self, decoded, gt, func):
assert len(decoded) == len(gt), f'batch size mismatch: {len(decoded)}!={len(gt)}'
results = []
for i, batch in enumerate(decoded):
for sentence in range(len(batch)):
error = func(decoded[i][sentence], gt[i])
results.append(error)
return np.mean(results)
|
# MIT License
#
# Copyright (c) 2018-2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A book with our finest spells
"""
import shutil
import subprocess
from pathlib import Path
from click.testing import CliRunner
from packit.cli.packit_base import packit_base
from packit.config import Config
from packit.utils import cwd, run_command
TESTS_DIR = Path(__file__).parent
DATA_DIR = TESTS_DIR / "data"
UPSTREAM = DATA_DIR / "upstream_git"
UPSTREAM_WITH_MUTLIPLE_SOURCES = DATA_DIR / "upstream_git_with_multiple_sources"
EMPTY_CHANGELOG = DATA_DIR / "empty_changelog"
DISTGIT = DATA_DIR / "dist_git"
UP_COCKPIT_OSTREE = DATA_DIR / "cockpit-ostree"
UP_OSBUILD = DATA_DIR / "osbuild"
UP_SNAPD = DATA_DIR / "snapd"
TARBALL_NAME = "beerware-0.1.0.tar.gz"
SOURCEGIT_UPSTREAM = DATA_DIR / "sourcegit" / "upstream"
SOURCEGIT_SOURCEGIT = DATA_DIR / "sourcegit" / "source_git"
DG_OGR = DATA_DIR / "dg-ogr"
SPECFILE = DATA_DIR / "upstream_git/beer.spec"
UPSTREAM_SPEC_NOT_IN_ROOT = DATA_DIR / "spec_not_in_root/upstream"
def git_set_user_email(directory):
subprocess.check_call(
["git", "config", "user.email", "test@example.com"], cwd=directory
)
subprocess.check_call(
["git", "config", "user.name", "Packit Test Suite"], cwd=directory
)
def get_test_config():
conf = Config()
conf._pagure_user_token = "test"
conf._github_token = "test"
return conf
def git_add_and_commit(directory, message):
subprocess.check_call(["git", "add", "."], cwd=directory)
subprocess.check_call(["git", "commit", "-m", message], cwd=directory)
def initiate_git_repo(
directory,
tag=None,
upstream_remote="https://lol.wat",
push=False,
copy_from: str = None,
remotes=None,
):
"""
Initiate a git repo for testing.
:param directory: path to the git repo
:param tag: if set, tag the latest commit with this tag
:param upstream_remote: name of the origin - upstream remote (not used when remotes are set)
:param remotes: provide list of tuples (name, remote_url)
:param push: push to the remote?
:param copy_from: source tree to copy to the newly created git repo
"""
if remotes is None:
remotes = [("origin", upstream_remote)]
if copy_from:
shutil.copytree(copy_from, directory)
subprocess.check_call(["git", "init", "."], cwd=directory)
Path(directory).joinpath("README").write_text("Best upstream project ever!")
git_set_user_email(directory)
subprocess.check_call(["git", "add", "."], cwd=directory)
subprocess.check_call(["git", "commit", "-m", "initial commit"], cwd=directory)
if tag:
subprocess.check_call(
["git", "tag", "-a", "-m", f"tag {tag}, tests", tag], cwd=directory
)
for name, url in remotes:
subprocess.check_call(["git", "remote", "add", name, url], cwd=directory)
if push:
subprocess.check_call(["git", "fetch", "origin"], cwd=directory)
# tox strips some env vars so your user gitconfig is not picked up
# hence we need to be very explicit with git commands here
subprocess.check_call(
["git", "push", "--tags", "-u", "origin", "master:master"], cwd=directory
)
def prepare_dist_git_repo(directory, push=True):
subprocess.check_call(["git", "branch", "f30"], cwd=directory)
if push:
subprocess.check_call(["git", "push", "-u", "origin", "f30:f30"], cwd=directory)
def call_packit(fnc=None, parameters=None, envs=None, working_dir=None):
working_dir = working_dir or "."
fnc = fnc or packit_base
runner = CliRunner()
envs = envs or {}
parameters = parameters or []
# catch exceptions enables debugger
with cwd(working_dir):
return runner.invoke(fnc, args=parameters, env=envs, catch_exceptions=False)
def build_srpm(path: Path):
run_command(["rpmbuild", "--rebuild", str(path)])
def can_a_module_be_imported(module_name):
""" can a module be imported? """
try:
__import__(module_name)
return True
except ImportError:
return False
|
from django.db import models
from accounts.models import Account
from store.models import Product, Variation
# Create your models here.
class Payment(models.Model):
user = models.ForeignKey(Account, on_delete=models.CASCADE)
payment_id = models.CharField(max_length=100)
payment_method = models.CharField(max_length=100)
amount_paid = models.CharField(max_length=100)
status = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.payment_id
class Order(models.Model):
STATUS = (
('New', 'New'),
('Accepted', 'Accepted'),
('Completed', 'Completed'),
('Cancelled', 'Cancelled'),
)
user = models.ForeignKey(Account, on_delete=models.SET_NULL, null=True)
payment = models.ForeignKey(Payment, on_delete=models.SET_NULL, blank=True, null=True)
order_number = models.CharField(max_length=20)
first_name = models.CharField(max_length=50)
last_name= models.CharField(max_length=50)
phone = models.CharField(max_length=15)
email = models.EmailField(max_length=50)
address_line_1 = models.CharField(max_length=50)
address_line_2 = models.CharField(max_length=50, blank=True)
country = models.CharField(max_length=50)
state = models.CharField(max_length=50)
city = models.CharField(max_length=50)
order_note = models.CharField(max_length=100, blank=True)
order_total = models.FloatField()
tax = models.FloatField()
status = models.CharField(max_length=10, choices=STATUS, default='New')
ip = models.CharField(blank=True, max_length=20)
is_ordered = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def full_name(self):
return f'{self.first_name} {self.last_name}'
def full_address(self):
return f'{self.address_line_1} {self.address_line_2}'
def __str__(self):
return self.first_name
class OrderProduct(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
payment = models.ForeignKey(Payment, on_delete=models.SET_NULL, blank=True, null=True)
user = models.ForeignKey(Account, on_delete=models.CASCADE)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
variations = models.ManyToManyField(Variation, blank=True)
quantity = models.IntegerField()
product_price = models.FloatField()
ordered = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.product.product_name
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Clout9.settings.production")
app = Celery('Clout9')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
# app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
# blog/asgi.py
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blog.settings")
from django.conf import settings
django.setup()
from django.core.asgi import get_asgi_application
from channels.security.websocket import OriginValidator
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import re_path
from app.website.consumers import ExampleConsumer
application = ProtocolTypeRouter(
{
# Django's ASGI application to handle traditional HTTP requests
"http": get_asgi_application(),
# WebSocket handler
"websocket": OriginValidator(AuthMiddlewareStack(
URLRouter(
[
re_path(r"^ws/example/$", ExampleConsumer.as_asgi()),
]
)
), settings.ALLOWED_HOSTS)
}
)
|
valor = 0
num = [[],[]]
for c in range (0,7):
valor = int(input(f'Digite o {c+1}° valor:'))
if valor % 2 == 0:
num[0].append(valor)
else:
num[1].append(valor)
print('-='*30)
print(f'Os numeros pares foram {num[0]}\nOs numeros ímpares foram {num[1]}.')
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'dnsprovejs'
copyright = '2018, Makoto Inoue'
author = 'Makoto Inoue'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx_js', 'sphinx.ext.autodoc']
root_for_relative_js_paths = '../lib'
js_source_path = ['../lib', '../lib/oracle', '../lib/dns']
primary_domain = 'js'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'dnsprovejsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'dnsprovejs.tex', 'dnsprovejs Documentation',
'Makoto Inoue', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dnsprovejs', 'dnsprovejs Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'dnsprovejs', 'dnsprovejs Documentation',
author, 'dnsprovejs', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
import os
import time
from membound_cases import memory_bound_cases_list
from utils import (arch_name, datatime_with_format, dtype2str, dump2json,
geometric_mean, md_table_header, scaled_repeat_times,
size2str)
import taichi as ti
class MemoryBound:
suite_name = 'memorybound'
supported_archs = [ti.x64, ti.cuda]
test_cases = memory_bound_cases_list
test_dtype_list = [ti.i32, ti.i64, ti.f32, ti.f64]
test_dsize_list = [
(4**i) * 1024 # kibibytes(KiB) = 1024
for i in range(1, 10) # [4KB,16KB...256MB]
]
basic_repeat_times = 10
evaluator = [geometric_mean]
def __init__(self, arch):
self._arch = arch
self._cases_impl = []
for case in self.test_cases:
for dtype in self.test_dtype_list:
impl = CaseImpl(case, arch, dtype, self.test_dsize_list,
self.evaluator)
self._cases_impl.append(impl)
def run(self):
for case in self._cases_impl:
case.run()
def save_as_json(self, arch_dir='./'):
#folder of suite
suite_path = os.path.join(arch_dir, self.suite_name)
os.makedirs(suite_path)
#json files
self._save_suite_info_as_json(suite_path)
self._save_cases_info_as_json(suite_path)
def save_as_markdown(self, arch_dir='./'):
current_time = datatime_with_format()
commit_hash = ti.core.get_commit_hash() #[:8]
file_name = f'{self.suite_name}.md'
file_path = os.path.join(arch_dir, file_name)
with open(file_path, 'w') as f:
lines = [
f'commit_hash: {commit_hash}\n', f'datatime: {current_time}\n'
]
lines += self._get_markdown_lines()
for line in lines:
print(line, file=f)
def _save_suite_info_as_json(self, suite_path='./'):
info_dict = {
'cases': [func.__name__ for func in self.test_cases],
'dtype': [dtype2str(dtype) for dtype in self.test_dtype_list],
'dsize': [size for size in self.test_dsize_list],
'repeat': [
scaled_repeat_times(self._arch, size, self.basic_repeat_times)
for size in self.test_dsize_list
],
'evaluator': [func.__name__ for func in self.evaluator]
}
info_path = os.path.join(suite_path, '_info.json')
with open(info_path, 'w') as f:
print(dump2json(info_dict), file=f)
def _save_cases_info_as_json(self, suite_path='./'):
for case in self.test_cases: #for case [fill,saxpy,reduction]
results_dict = {}
for impl in self._cases_impl: #find [ti.i32, ti.i64, ti.f32, ti.f64]
if impl._name != case.__name__:
continue
result_name = dtype2str(impl._test_dtype)
results_dict[result_name] = impl.get_results_dict()
case_path = os.path.join(suite_path, (case.__name__ + '.json'))
with open(case_path, 'w') as f:
case_str = dump2json(results_dict)
print(case_str, file=f)
def _get_markdown_lines(self):
lines = []
lines += md_table_header(self.suite_name, self._arch,
self.test_dsize_list, self.basic_repeat_times,
self.evaluator)
result_header = '|kernel elapsed time(ms)' + ''.join(
'|' for i in range(
len(self.test_dsize_list) + len(MemoryBound.evaluator)))
lines += [result_header]
for case in self._cases_impl:
lines += case.get_markdown_lines()
lines.append('')
return lines
class CaseImpl:
def __init__(self, func, arch, test_dtype, test_dsize_list, evaluator):
self._func = func
self._name = func.__name__
self._arch = arch
self._test_dtype = test_dtype
self._test_dsize_list = test_dsize_list
self._min_time_in_us = [] #test results
self._evaluator = evaluator
def run(self):
ti.init(kernel_profiler=True, arch=self._arch)
print("TestCase[%s.%s.%s]" % (self._func.__name__, arch_name(
self._arch), dtype2str(self._test_dtype)))
for test_dsize in self._test_dsize_list:
print("test_dsize = %s" % (size2str(test_dsize)))
self._min_time_in_us.append(
self._func(self._arch, self._test_dtype, test_dsize,
MemoryBound.basic_repeat_times))
time.sleep(0.2)
ti.reset()
def get_markdown_lines(self):
string = '|' + self._name + '.' + dtype2str(self._test_dtype) + '|'
string += ''.join(
str(round(time, 4)) + '|' for time in self._min_time_in_us)
string += ''.join(
str(round(item(self._min_time_in_us), 4)) + '|'
for item in self._evaluator)
return [string]
def get_results_dict(self):
results_dict = {}
for i in range(len(self._test_dsize_list)):
dsize = self._test_dsize_list[i]
repeat = scaled_repeat_times(self._arch, dsize,
MemoryBound.basic_repeat_times)
elapsed_time = self._min_time_in_us[i]
item_name = size2str(dsize).replace('.0', '')
item_dict = {
'dsize_byte': dsize,
'repeat': repeat,
'elapsed_time_ms': elapsed_time
}
results_dict[item_name] = item_dict
return results_dict
|
from flask_security.utils import hash_password
from flask_smorest import Blueprint, abort
from ..models.user import User
from ..schemas.paging import PageInSchema, paginate
from ..schemas.user import UserPageOutSchema, UserSchema
from .methodviews import ProtectedMethodView
blueprint = Blueprint('users', 'user')
@blueprint.route('', endpoint='list')
class UserListAPI(ProtectedMethodView):
@blueprint.arguments(PageInSchema(), location='headers')
@blueprint.response(UserPageOutSchema)
def get(self, pagination):
"""List users"""
return paginate(User.select(), pagination)
@blueprint.arguments(UserSchema)
@blueprint.response(UserSchema)
def post(self, args):
"""Create user"""
user = User(**args)
user.password = hash_password(user.password)
user.save()
return user
@blueprint.route('/<user_id>', endpoint='detail')
class UserAPI(ProtectedMethodView):
@blueprint.response(UserSchema)
def get(self, user_id):
"""Get user details"""
try:
user = User.get(id=user_id)
except User.DoesNotExist:
abort(404, message='User not found')
return user
@blueprint.arguments(UserSchema(partial=True))
@blueprint.response(UserSchema)
def patch(self, args, user_id):
try:
user = User.get(id=user_id)
except User.DoesNotExist:
abort(404, message='User not found')
for field in args:
setattr(user, field, args[field])
if 'password' in args:
user.password = hash_password(user.password)
user.save()
return user
@blueprint.response(UserSchema)
def delete(self, user_id):
try:
user = User.get(id=user_id)
except User.DoesNotExist:
abort(404, message='User not found')
user.delete_instance()
return user
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 22 18:44:02 2017
@author: Tirthajyoti Sarkar
Simple selection sort with counter for total number of operations (finding minimum and swapping)
Accepts user input on minimum and maximum bound of the array and the size of the array.
"""
import random
def find_min(array):
n=len(array)
r = array[0]
count=0
for i in range(1,n):
count+=1
if r>array[i]:
r=array[i]
return(r,count)
def selection_sort(array):
n=len(array)
num_op=0
# Iterate over the length of the array, pushing smaller values to the left
for i in range(n):
# Scan the array from i-th element (where the iterator is currently) to the end for minimum
m,c_min=find_min(array[i:n])
# IMPORTANT: Get the index of the minimum element w.r.t. to the main array
m_index=array[i:n].index(m)+i
# If the first element of the unsorted portion i.e. i-th element> minimum, then SWAP
if (array[i]>m):
# Print statement for examining minimum and its index, Troubleshooting
#print("Minimum found {} at position {}. Swapping positions {} and {}".format(m,m_index,i,m_index))
temp=array[i]
array[i]=m
array[m_index]=temp
num_op+=(c_min+1)
print(array)
else:
pass
return (array,num_op)
# User inputs for generating the random arrays
mini = int(input("Enter the minimum bound:"))
maxi = int(input("Enter the maximum bound:"))
num = int(input("Enter the size:"))
# Create random array based on user-specified minimum/maximum bounds and number of elements
a= []
for i in range(num):
a.append(random.randint(mini,maxi))
print("\nInitial array:",a)
# Get the sorted array back along with the count of # of operations it took to sort
sorted_array,n_op=selection_sort(a)
print("Sorted array: {}\nTook {} operations".format(sorted_array,n_op))
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('hyperlink23.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url('A1', 'https://en.wikipedia.org/wiki/Microsoft_Excel#Data_storage_and_communication', None, 'Display text')
workbook.close()
self.assertExcelEqual()
|
import boto3
import pickle
import logging
from datetime import date
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType, StringType
from generate_common_words import dump_pickle
import config as conf
'''
Assign Top 500 Stackoverflow tags to job postings
'''
# ===== Logger Configs =====
TS = date.today().strftime('%y%m%d')
logger = logging.getLogger('jd_logger')
logger.setLevel(logging.INFO)
fh = logging.FileHandler(conf.LOG_DIR + TS + '_batch_process.log')
fh.setLevel(logging.INFO)
logger.addHandler(fh)
def read_pickle(bucket, key):
s3 = boto3.resource('s3')
pickled = pickle.loads(s3.Bucket(bucket)\
.Object(key)\
.get()['Body']\
.read())
return pickled
def filter_common_words(tokens, word_list):
"""
Filter out words that appear in many of the documents
Args:
tokens: a list of word lemmas
word_list: a list of common words
Returns:
a set of word lemmas with the common words removed
"""
return list(set([word for word in tokens if word not in word_list]))
def select_tag_words(tokens, tag_list):
"""
Match Stackoverflow tags to word lemmas
Args:
tokens: a list of word lemmas
tag_list: a list of tags
Returns:
a list of tags for each job posting
"""
return [tag for tag in tag_list if tag in tokens]
def assign_tags(jd):
"""
Assign Stackoverflow tags and construct a set of keywords
Args:
jd: cleaned job posting dataframe
Returns:
a dataframe with columns containing keywords and tags
"""
logger.info('[STARTING]: Assigning Tags')
common_words = read_pickle(conf.PARQUET_BUCKET, conf.COMMON_WORDS_PATH)
stack_tags = read_pickle(conf.PARQUET_BUCKET, conf.TAG_PATH)
logger.info('[PROCESSING]: Removing Common Words')
cw_remover = udf(lambda body: filter_common_words(body, common_words), ArrayType(StringType()))
jd_keywords = jd.withColumn('keywords', cw_remover('stemmed'))
logger.info('[PROCESSING]: Getting Tags')
tagger = udf(lambda body: select_tag_words(body, stack_tags), ArrayType(StringType()))
jd_tags = jd_keywords.withColumn('tags', tagger('keywords'))
logger.info('[Finished]: Assigning Tags')
return jd_tags
|
"""
WSGI config for simplerest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simplerest.settings")
application = get_wsgi_application()
|
import json
import os
from mako.template import Template
from models.pomodoro_model import PomodoroModel
from datetime import datetime
class ExportPomsResource:
def on_get(self, req, resp):
"""Handles GET requests"""
resp.content_type = 'text/html'
dir_path = os.path.dirname(os.path.realpath(__file__))
user_login_template = Template(
filename=dir_path + '/export_poms_view.mako')
resp.body = user_login_template.render()
def on_post(self, req, resp):
"""Handles POST requests"""
start_date = req.get_param('start_date')
end_date = req.get_param('end_date')
# Query poms within start and end dates
poms = req.context['session'].query(
PomodoroModel).filter(PomodoroModel.created <= end_date). \
filter(PomodoroModel.created >= start_date).filter_by(
user_id=req.context['user'].id).order_by(
PomodoroModel.created, PomodoroModel.start_time).all()
data = {'poms': []}
for row in poms:
pom = {
'created': datetime.strftime(row.created, '%Y-%m-%d'),
'title': row.task,
'start_time': row.start_time.strftime('%I:%M%p'),
'end_time': row.end_time.strftime('%I:%M%p'),
'distractions': row.distractions,
'pom_success': row.pom_success,
'review': row.review,
'flags': []
}
for flag in row.flags:
pom['flags'].append(flag.flag_type)
data['poms'].append(pom)
filename = str(start_date) + '-' + str(
end_date) + '_Arin_Pom_Sheets.json'
resp.body = json.dumps(data, indent=2)
resp.downloadable_as = filename
resp.content_type = 'application/octet-stream'
|
import csv
import json
def read_data_file(filename):
all_records = []
with open(filename, mode='r') as csv_file:
csv_reader = csv.reader(csv_file)
row_count = 0
for row in csv_reader:
all_records.append(row)
return all_records
def parse_play_fields(play_fields):
plays = []
for p in play_fields:
temp = {}
temp["event_count"] = int(p[0])
temp["inning_number"] = int(p[1])
temp["home_field_indicator"] = int(p[2])
temp["player_code"] = p[3]
temp["count_at_action"] = p[4]
temp["all_pitches"] = p[5]
temp["play_events"] = p[6]
plays.append(temp)
return plays
def parse_sub_fields(sub_fields):
subs = []
for s in sub_fields:
temp = {}
temp["event_count"] = int(s[0])
temp["player_code"] = s[1]
temp["name"] = s[2]
temp["home_field_indicator"] = int(s[3])
temp["batting_order"] = int(s[4])
temp["field_position"] = int(s[5])
subs.append(temp)
return subs
def parse_info_fields(info_fields):
infos = []
for i in info_fields:
temp = {}
temp[i[0]] = i[1]
infos.append(temp)
return infos
def parse_start_fields(start_fields):
starts = []
for s in start_fields:
temp = {}
temp["player_code"] = s[0]
temp["name"] = s[1]
temp["home_field_indicator"] = int(s[2])
temp["batting_order"] = int(s[3])
temp["field_position"] = int(s[4])
starts.append(temp)
return starts
def parse_data_fields(data_fields):
datas = []
for d in data_fields:
temp = {}
temp["type"] = d[0]
temp["player_code"] = d[1]
temp["earned_runs"] = int(d[2])
datas.append(temp)
return datas
def get_fields(records):
id = ""
version = ""
play = []
info = []
start = []
data = []
sub = []
event_count = 0
for r in records:
if r[0] == "play":
r[0] = event_count
event_count += 1
play.append(r)
elif r[0] == "info":
info.append(r[1:])
elif r[0] == "start":
start.append(r[1:])
elif r[0] == "data":
data.append(r[1:])
elif r[0] == "sub":
r[0] = event_count
event_count += 1
sub.append(r)
elif r[0] == "com":
continue # This one we should ignore
elif r[0].endswith("adj"):
continue # This one we should ignore
elif r[0] == "id":
id = r[1]
elif r[0] == "version":
version = r[1]
else:
print("ERROR")
print(r)
return id, version, play, info, start, data, sub
def get_game(game_records):
id, version, plays, infos, starts, datas, subs = get_fields(game_records)
play_list = parse_play_fields(plays)
info_list = parse_info_fields(infos)
start_list = parse_start_fields(starts)
data_list = parse_data_fields(datas)
sub_list = parse_sub_fields(subs)
game = {}
game["id"] = id
game["version"] = version
game["plays"] = play_list
game["info"] = info_list
game["start"] = start_list
game["data"] = data_list
game["subs"] = sub_list
return game
def get_all_games(all_game_records):
all_games = []
for g in all_game_records:
all_games.append(get_game(g))
return json.dumps(all_games)
# path = "./../storage/ANA201808100.in"
# game_records = read_data_file(path)
# game = get_game(game_records)
#
# out = json.loads(game)
# print(out["id"])
|
from rubicon.java.android_events import Handler, PythonRunnable
from rubicon.java.jni import java
from travertino.size import at_least
from ..libs.android import R__color
from ..libs.android.graphics import BitmapFactory, Rect
from ..libs.android.view import Gravity, OnClickListener, View__MeasureSpec
from ..libs.android.widget import (
ImageView,
ImageView__ScaleType,
LinearLayout,
LinearLayout__LayoutParams,
RelativeLayout,
RelativeLayout__LayoutParams,
ScrollView,
TextView
)
from ..libs.androidx.swiperefreshlayout import (
SwipeRefreshLayout,
SwipeRefreshLayout__OnRefreshListener
)
from .base import Widget
class DetailedListOnClickListener(OnClickListener):
def __init__(self, impl, row_number):
super().__init__()
self._impl = impl
self._row_number = row_number
def onClick(self, _view):
row = self._impl.interface.data[self._row_number]
self._impl._selection = row
if self._impl.interface.on_select:
self._impl.interface.on_select(self._impl.interface, row=self._impl.interface.data[self._row_number])
class OnRefreshListener(SwipeRefreshLayout__OnRefreshListener):
def __init__(self, interface):
super().__init__()
self._interface = interface
def onRefresh(self):
if self._interface.on_refresh:
self._interface.on_refresh(self._interface)
class DetailedList(Widget):
ROW_HEIGHT = 250
_swipe_refresh_layout = None
_scroll_view = None
_dismissable_container = None
_selection = None
def create(self):
# DetailedList is not a specific widget on Android, so we build it out
# of a few pieces.
if self.native is None:
self.native = LinearLayout(self._native_activity)
self.native.setOrientation(LinearLayout.VERTICAL)
else:
# If create() is called a second time, clear the widget and regenerate it.
self.native.removeAllViews()
scroll_view = ScrollView(self._native_activity)
self._scroll_view = ScrollView(
__jni__=java.NewGlobalRef(scroll_view))
scroll_view_layout_params = LinearLayout__LayoutParams(
LinearLayout__LayoutParams.MATCH_PARENT,
LinearLayout__LayoutParams.MATCH_PARENT
)
scroll_view_layout_params.gravity = Gravity.TOP
swipe_refresh_wrapper = SwipeRefreshLayout(self._native_activity)
swipe_refresh_wrapper.setOnRefreshListener(OnRefreshListener(self.interface))
self._swipe_refresh_layout = SwipeRefreshLayout(
__jni__=java.NewGlobalRef(swipe_refresh_wrapper))
swipe_refresh_wrapper.addView(scroll_view)
self.native.addView(swipe_refresh_wrapper, scroll_view_layout_params)
dismissable_container = LinearLayout(self._native_activity)
self._dismissable_container = LinearLayout(
__jni__=java.NewGlobalRef(dismissable_container)
)
dismissable_container.setOrientation(LinearLayout.VERTICAL)
dismissable_container_params = LinearLayout__LayoutParams(
LinearLayout__LayoutParams.MATCH_PARENT,
LinearLayout__LayoutParams.MATCH_PARENT
)
scroll_view.addView(
dismissable_container, dismissable_container_params
)
for i in range(len((self.interface.data or []))):
self._make_row(dismissable_container, i)
def _make_row(self, container, i):
# Create the foreground.
row_foreground = RelativeLayout(self._native_activity)
container.addView(row_foreground)
# Add user-provided icon to layout.
icon_image_view = ImageView(self._native_activity)
icon = self.interface.data[i].icon
if icon is not None:
icon.bind(self.interface.factory)
bitmap = BitmapFactory.decodeFile(str(icon._impl.path))
icon_image_view.setImageBitmap(bitmap)
icon_layout_params = RelativeLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.WRAP_CONTENT)
icon_layout_params.width = 150
icon_layout_params.setMargins(25, 0, 25, 0)
icon_layout_params.height = self.ROW_HEIGHT
icon_image_view.setScaleType(ImageView__ScaleType.FIT_CENTER)
row_foreground.addView(icon_image_view, icon_layout_params)
# Create layout to show top_text and bottom_text.
text_container = LinearLayout(self._native_activity)
text_container_params = RelativeLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.WRAP_CONTENT)
text_container_params.height = self.ROW_HEIGHT
text_container_params.setMargins(25 + 25 + 150, 0, 0, 0)
row_foreground.addView(text_container, text_container_params)
text_container.setOrientation(LinearLayout.VERTICAL)
text_container.setWeightSum(2.0)
# Create top & bottom text; add them to layout.
top_text = TextView(self._native_activity)
top_text.setText(str(getattr(self.interface.data[i], 'title', '')))
top_text.setTextSize(20.0)
top_text.setTextColor(self._native_activity.getResources().getColor(R__color.black))
bottom_text = TextView(self._native_activity)
bottom_text.setTextColor(self._native_activity.getResources().getColor(R__color.black))
bottom_text.setText(str(getattr(self.interface.data[i], 'subtitle', '')))
bottom_text.setTextSize(16.0)
top_text_params = LinearLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.MATCH_PARENT)
top_text_params.weight = 1.0
top_text.setGravity(Gravity.BOTTOM)
text_container.addView(top_text, top_text_params)
bottom_text_params = LinearLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.MATCH_PARENT)
bottom_text_params.weight = 1.0
bottom_text.setGravity(Gravity.TOP)
bottom_text_params.gravity = Gravity.TOP
text_container.addView(bottom_text, bottom_text_params)
# Apply an onclick listener so that clicking anywhere on the row triggers Toga's on_select(row).
row_foreground.setOnClickListener(DetailedListOnClickListener(self, i))
def change_source(self, source):
# If the source changes, re-build the widget.
self.create()
def set_on_refresh(self, handler):
# No special handling needed.
pass
def after_on_refresh(self):
if self._swipe_refresh_layout:
self._swipe_refresh_layout.setRefreshing(False)
def insert(self, index, item):
# If the data changes, re-build the widget. Brutally effective.
self.create()
def change(self, item):
# If the data changes, re-build the widget. Brutally effective.
self.create()
def remove(self, index, item):
# If the data changes, re-build the widget. Brutally effective.
self.create()
def clear(self):
# If the data changes, re-build the widget. Brutally effective.
self.create()
def get_selection(self):
return self._selection
def set_on_select(self, handler):
# No special handling required.
pass
def set_on_delete(self, handler):
# This widget currently does not implement event handlers for data change.
self.interface.factory.not_implemented("DetailedList.set_on_delete()")
def scroll_to_row(self, row):
def scroll():
row_obj = self._dismissable_container.getChildAt(row)
hit_rect = Rect()
row_obj.getHitRect(hit_rect)
self._scroll_view.requestChildRectangleOnScreen(
self._dismissable_container,
hit_rect,
False,
)
Handler().post(PythonRunnable(scroll))
def rehint(self):
# Android can crash when rendering some widgets until they have their layout params set. Guard for that case.
if self.native.getLayoutParams() is None:
return
self.native.measure(
View__MeasureSpec.UNSPECIFIED,
View__MeasureSpec.UNSPECIFIED,
)
self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())
self.interface.intrinsic.height = self.native.getMeasuredHeight()
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
#
# Copyright (C) 2021 Graz University of Technology.
#
# Invenio-Records-Marc21 is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Permissions for Invenio Marc21 Records."""
from invenio_records_permissions.generators import AnyUser, SystemProcess
from invenio_records_permissions.policies.records import RecordPermissionPolicy
class Marc21RecordPermissionPolicy(RecordPermissionPolicy):
"""Access control configuration for records.
Note that even if the array is empty, the invenio_access Permission class
always adds the ``superuser-access``, so admins will always be allowed.
- Create action given to everyone for now.
- Read access given to everyone if public record and given to owners
always. (inherited)
- Update access given to record owners. (inherited)
- Delete access given to admins only. (inherited)
"""
# TODO: Change all below when permissions settled
can_create = [AnyUser()]
can_update_files = [AnyUser()]
can_publish = [AnyUser()]
can_read = [AnyUser()]
can_update = [AnyUser()]
can_new_version = [AnyUser()]
can_edit = [AnyUser()]
can_lift_embargo = [AnyUser()]
# Draft permissions
can_read_draft = [AnyUser()]
can_delete_draft = [AnyUser()]
can_update_draft = [AnyUser()]
can_search_drafts = [AnyUser()]
can_draft_read_files = [AnyUser()]
can_draft_create_files = [AnyUser(), SystemProcess()]
can_draft_update_files = [AnyUser()]
can_draft_delete_files = [AnyUser()]
# Files permissions
can_read_files = [AnyUser()]
can_create_files = [AnyUser(), SystemProcess()]
can_update_files = [AnyUser()]
can_delete_files = [AnyUser()]
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "animalRescue.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
import ast
import inspect
import json
import logging
import os
import sys
import tempfile
from functools import wraps
from collections import deque
from execution_trace.constants import RECORD_FN_NAME, RETVAL_NAME, MANGLED_FN_NAME
from execution_trace.utils import strip_indent
# Init logging.
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Will be initialized in `record`. See `init_recorded_state`.
_record_store_hidden_123 = None
# To guard against decorating more than one function.
num_fns_recorded = 0
# To know when to print out the source code of the function.
first_dump_call = True
# To know how many executions were recorded.
num_recorded_executions = 0
def _record_state_fn_hidden_123(lineno, f_locals):
"""Stores local line data."""
# Make sure we have just primitive types.
f_locals = {k: repr(v) for k, v in f_locals.iteritems()}
data = {
'lineno': lineno,
'state': f_locals,
}
# This is a stack so always append in the top frame.
_record_store_hidden_123[-1]['data'].append(data)
# http://stackoverflow.com/a/12240419
# TL;DR need this because the decorator would
# recursively apply on the new generated function.
_blocked = False
def record(num_executions=1):
def _record(f):
"""Transforms `f` such that after every line record_state is called.
*** HERE BE DRAGONS ***
"""
global num_fns_recorded
# Make sure this is not a recursive decorator application.
global _blocked
if _blocked:
return f
# We only support recording one fn's executions at the moment.
if num_fns_recorded:
raise ValueError('Cannot `record` more than one function at a time.')
num_fns_recorded += 1
source = inspect.getsource(f)
parsed = ast.parse(strip_indent(source))
original_body = list(parsed.body[0].body)
# Update body
parsed.body[0].body = _fill_body_with_record(original_body)
# Compile and inject modified function back into its env.
new_f_compiled = compile(parsed, '<string>', 'exec')
env = sys.modules[f.__module__].__dict__
# We also need to inject our stuff in there.
env[RECORD_FN_NAME] = globals()[RECORD_FN_NAME]
_blocked = True
exec new_f_compiled in env
_blocked = False
# Keep a reference to the (original) mangled function, because our decorator
# will end up replacing it with `wrapped`. Then, whenever `wrapped` ends up
# calling the original function, it would end up calling itself, leading
# to an infinite recursion. Thus, we keep the fn we want to call under
# a separate key which `wrapped` can call without a problem.
# We are doing this instead of simply changing the recorded fn's name because
# we have to support recursive calls (which would lead to NameError if we changed
# the fn's name).
env[MANGLED_FN_NAME] = env[f.__name__]
init_recorded_state()
file, path = _get_dump_file()
logger.info("Will record execution of %s in %s . "
"Use `view_trace %s` to view it.",
f.__name__, path, path)
# Wrap in our own function such that we can dump the recorded state at the end.
@wraps(f)
def wrapped(*args, **kwargs):
# Write source to file the first time we are called.
global first_dump_call
if first_dump_call:
dump_fn_source(file, source)
first_dump_call = False
global num_recorded_executions
# Are we still recording?
if num_recorded_executions < num_executions:
# New stack frame -> new state.
push_recorded_state()
# Call the recorded function. This might throw.
try:
ret = env[MANGLED_FN_NAME](*args, **kwargs)
except:
# Re-raise, let the user handle this.
raise
finally:
# But still log what we captured.
dump_recorded_state(file)
# Done recording on this frame.
pop_recorded_state()
num_recorded_executions += 1
# If not, just call the original function.
else:
ret = f(*args, **kwargs)
return ret
return wrapped
return _record
def _make_record_state_call_expr(lineno):
# Create locals() call.
name = ast.Name(ctx=ast.Load(), id='locals', lineno=0, col_offset=0)
locals_call = ast.Call(func=name, lineno=0, col_offset=0, args=[], keywords=[])
# Create lineno constant arg.
num = ast.Num(n=lineno, lineno=0, col_offset=0)
# Create record_state call.
name = ast.Name(ctx=ast.Load(), id=RECORD_FN_NAME, lineno=0, col_offset=0)
call = ast.Call(func=name, lineno=0, col_offset=0,
args=[num, locals_call],
keywords=[])
expr = ast.Expr(value=call, lineno=0, col_offset=0)
return expr
def _make_return_trace_call_exprs(item):
# Store retval in an aux var and return that instead.
store_name = ast.Name(ctx=ast.Store(), id=RETVAL_NAME, col_offset=0, lineno=0)
load_name = ast.Name(ctx=ast.Load(), id=RETVAL_NAME, col_offset=0, lineno=0)
assign = ast.Assign(col_offset=0, targets=[store_name], value=item.value, lineno=0)
ret = ast.Return(lineno=0, value=load_name, col_offset=0)
return [
assign,
_make_record_state_call_expr(item.lineno),
ret
]
def _fill_body_with_record(original_body, prepend=False, lineno=None):
"""Adds a record_state call after every item in the block.
Recursive, works for nested bodies (e.g. if statements).
`prepend` inserts a record_state call right at the start. We need this for
recording the state on lines introducing nested blocks (`if`, `while` etc.)
"""
new_body = []
if prepend:
assert lineno is not None, "Should've called prepend with a lineno."
new_body.append(_make_record_state_call_expr(lineno))
for item in original_body:
# Handle return statements separately such that we capture retval as well.
if isinstance(item, ast.Return):
new_body.extend(_make_return_trace_call_exprs(item))
continue
has_nested = False
# Look out for nested bodies.
if hasattr(item, 'body'):
has_nested = True
new_nested_body = _fill_body_with_record(item.body, prepend=True, lineno=item.lineno)
item.body = new_nested_body
if hasattr(item, 'orelse'):
has_nested = True
# Don't want to prepend call for try/except, but we want for the others.
if isinstance(item, ast.TryExcept):
prepend = False
else:
prepend = True
# `else` does not have a lineno, using `if`'s lineno.
new_nested_body = _fill_body_with_record(item.orelse, prepend=prepend, lineno=item.lineno)
item.orelse = new_nested_body
# Except blocks.
if hasattr(item, 'handlers'):
has_nested = True
for handler in item.handlers:
new_nested_body = _fill_body_with_record(handler.body, prepend=False, lineno=handler.lineno)
handler.body = new_nested_body
new_body.append(item)
# Don't append a call after the end of the nested body, it's redundant.
if not has_nested:
new_body.append(_make_record_state_call_expr(item.lineno))
return new_body
def _get_dump_file():
"""Returns file object and its path."""
fd, path = tempfile.mkstemp(prefix='record_', suffix='.json')
# Will never be `close`d because we don't know when user stops the program.
# We'll live with this.
file = os.fdopen(fd, 'w')
return file, path
def init_recorded_state():
global _record_store_hidden_123
# Using a stack for the frames' states to support recursive fns.
_record_store_hidden_123 = deque()
def push_recorded_state():
global _record_store_hidden_123
_record_store_hidden_123.append({'data': []})
def pop_recorded_state():
global _record_store_hidden_123
_record_store_hidden_123.pop()
def dump_recorded_state(file):
# This is a stack so always dump top call.
json.dump(_record_store_hidden_123[-1], file)
file.write('\n')
def dump_fn_source(file, source):
data = {'source': source}
json.dump(data, file)
file.write('\n')
|
import http.server
import threading
import testPackage
class MyHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
self.wfile.write(b"<html><head><title>My Title</title></head>")
self.wfile.write(bytes("<body><p>You accessed path: %s</p>" % self.path,"utf-8"))
self.wfile.write(bytes("Light Value: %d" % testPackage.LIGHT_VALUE,"utf-8"))
self.wfile.write(b"</body></html>")
print("Sent light value")
def do_POST(self):
testPackage.increase_light()
self._set_headers()
self.wfile.write(b"Success")
print("Increasing Light value")
class serverThread (threading.Thread) :
def __init__(self, threadID):
threading.Thread.__init__(self)
self.threadID = threadID
def run(self):
print ("Starting serverThread: %d" % self.threadID)
httpd = http.server.HTTPServer(server_address, request_handler)
httpd.serve_forever()
print ("Exiting thread")
port = 30000
server_address = ('',port)
request_handler = MyHTTPRequestHandler
thread_server = serverThread(1)
thread_server.start()
testPackage.polling_forever()
print('After serve_forever()')
|
import sys
from datetime import datetime
from app import filters
from app.misc.execute import run
def main(args: list[str]):
from app.handlers import dp
# Сохраняем нынешнее время для подсчёта времени безотказной работы
dp.bot['start_time'] = datetime.now()
dp['args'] = args
run(dp)
if __name__ == '__main__':
filters.setup()
main(sys.argv[1:])
|
import cv2
import threading
class RecordingThread (threading.Thread):
def __init__(self, name, camera):
threading.Thread.__init__(self)
self.name = name
self.isRunning = True
self.cap = camera
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.out = cv2.VideoWriter('./static/video.avi',fourcc, 20.0, (640,480))
def run(self):
while self.isRunning:
ret, frame = self.cap.read()
if ret:
self.out.write(frame)
self.out.release()
def stop(self):
self.isRunning = False
def __del__(self):
self.out.release()
class VideoCamera(object):
def __init__(self):
# Open a camera
self.cap = cv2.VideoCapture(2)
# Initialize video recording environment
self.is_record = False
self.out = None
# Thread for recording
self.recordingThread = None
def __del__(self):
self.cap.release()
def get_frame(self):
ret, frame = self.cap.read()
if ret:
ret, jpeg = cv2.imencode('.jpg', frame)
# Record video
# if self.is_record:
# if self.out == None:
# fourcc = cv2.VideoWriter_fourcc(*'MJPG')
# self.out = cv2.VideoWriter('./static/video.avi',fourcc, 20.0, (640,480))
# ret, frame = self.cap.read()
# if ret:
# self.out.write(frame)
# else:
# if self.out != None:
# self.out.release()
# self.out = None
return jpeg.tobytes()
else:
return None
def start_record(self):
self.is_record = True
self.recordingThread = RecordingThread("Video Recording Thread", self.cap)
self.recordingThread.start()
def stop_record(self):
self.is_record = False
if self.recordingThread != None:
self.recordingThread.stop()
|
from __future__ import absolute_import
import datetime
import os
from socket import error as SocketError, timeout as SocketTimeout
import socket
import sys
import warnings
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages import six
from .packages.ssl_match_hostname import match_hostname
from .util import connection
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection
from http.client import HTTPException # noqa: unused in this module
except ImportError:
from httplib import HTTPConnection as _HTTPConnection
from httplib import HTTPException # noqa: unused in this module
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
if (ca_certs or ca_cert_dir) and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
# In case the hostname is an IPv6 address, strip the square
# brackets from it before using it to validate. This is because
# a certificate with an IPv6 address in it won't have square
# brackets around that address. Sadly, match_hostname won't do this
# for us: it expects the plain host part without any extra work
# that might have been done to make it palatable to httplib.
asserted_hostname = self.assert_hostname or hostname
asserted_hostname = asserted_hostname.strip('[]')
match_hostname(cert, asserted_hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import shutil
import subprocess
from pre_wigs_validation.enums import ValidationEnforcement, ValidationResult
from pre_wigs_validation.instance import ValidationInstance
from pre_wigs_validation.dataclasses import ValidationOutput
from pre_wigs_validation.utils import check_validation_config, get_ld_library_orig
class RepoAccess:
"""Validate that an instance has access to Yum Repositories."""
validation = "Repo Access"
enforcement = ValidationEnforcement.REQUIRED
@classmethod
def validate(
cls, *, enabled: bool = True, instance: ValidationInstance
) -> ValidationOutput:
"""
Parameters:
enabled (bool): whether or not to run this validation function
instance (ValidationInstance): the instance object being validated
Returns:
ValidationOutput: output of validation
"""
if not enabled:
return ValidationOutput(
validation=cls.validation,
result=ValidationResult.NOT_RUN,
enforcement=cls.enforcement,
)
if instance.distribution == "sles":
repo_check_command = ["zypper", "refresh"]
verbose_error_message = "Command 'zypper' not in $PATH"
package_manager_found = not shutil.which("zypper") == None
else:
repo_check_command = ["yum", "check-update"]
verbose_error_message = "Command 'yum' not in $PATH"
package_manager_found = not shutil.which("yum") == None
error_message = "Unable to validate due to unsupported environment"
fail_message = "Unable to access repositories"
config = check_validation_config(
default_params=cls.validate.__kwdefaults__, local_params=locals()
)
if not package_manager_found:
return ValidationOutput(
validation=cls.validation,
result=ValidationResult.FAIL,
enforcement=cls.enforcement,
config=config,
message=error_message,
verbose_message=verbose_error_message,
)
proc = subprocess.run(
repo_check_command,
env=get_ld_library_orig(),
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE,
)
if proc.returncode == 1:
verbose_message = proc.stderr.decode("utf-8")
return ValidationOutput(
validation=cls.validation,
result=ValidationResult.FAIL,
enforcement=cls.enforcement,
config=config,
message=fail_message,
verbose_message=verbose_message,
)
return ValidationOutput(
validation=cls.validation,
result=ValidationResult.PASS,
enforcement=cls.enforcement,
config=config,
)
|
print 'a.py is executed'
|
from numpy.random import RandomState
from typing import Any, Optional, List
from numpy import arange
from copy import deepcopy
from pydeeprecsys.rl.neural_networks.dueling import DuelingDDQN
from pydeeprecsys.rl.experience_replay.priority_replay_buffer import (
PrioritizedExperienceReplayBuffer,
)
from pydeeprecsys.rl.experience_replay.buffer_parameters import (
PERBufferParameters,
ExperienceReplayBufferParameters,
)
from pydeeprecsys.rl.agents.agent import ReinforcementLearning
from pydeeprecsys.rl.learning_statistics import LearningStatistics
class RainbowDQNAgent(ReinforcementLearning):
"""Instead of sampling randomly from the buffer we prioritize experiences with PER
Instead of epsilon-greedy we use gaussian noisy layers for exploration
Instead of the Q value we calculate Value and Advantage (Dueling DQN).
This implementation does not include the Categorical DQN part (yet)."""
def __init__(
self,
input_size: int,
output_size: int,
network_update_frequency: int = 5,
network_sync_frequency: int = 200,
priority_importance: float = 0.6,
priority_weigth_growth: float = 0.001,
buffer_size: int = 10000,
buffer_burn_in: int = 1000,
batch_size: int = 32,
noise_sigma: float = 0.017,
discount_factor: float = 0.99,
learning_rate: float = 0.0001,
hidden_layers: List[int] = None,
random_state: RandomState = RandomState(),
statistics: Optional[LearningStatistics] = None,
):
self.network = DuelingDDQN(
n_input=input_size,
n_output=output_size,
learning_rate=learning_rate,
noise_sigma=noise_sigma,
discount_factor=discount_factor,
statistics=statistics,
hidden_layers=hidden_layers,
)
self.target_network = deepcopy(self.network)
self.buffer = PrioritizedExperienceReplayBuffer(
ExperienceReplayBufferParameters(
max_experiences=buffer_size,
minimum_experiences_to_start_predicting=buffer_burn_in,
batch_size=batch_size,
random_state=random_state,
),
PERBufferParameters(
alpha=priority_importance,
beta_growth=priority_weigth_growth,
),
)
self.step_count = 0
self.network_update_frequency = network_update_frequency
self.network_sync_frequency = network_sync_frequency
self.actions = arange(output_size)
self.random_state = random_state
def _check_update_network(self):
# we only start training the network once the buffer is ready
# (the burn in is filled)
if self.buffer.ready_to_predict():
self.step_count += 1
if self.step_count % self.network_update_frequency == 0:
# we train at every K steps
self.network.learn_with(self.buffer, self.target_network)
if self.step_count % self.network_sync_frequency == 0:
# at every N steps replaces the target network with the main network
self.target_network.load_state_dict(self.network.state_dict())
def top_k_actions_for_state(self, state: Any, k: int = 1) -> Any:
state_flat = state.flatten()
if self.buffer.ready_to_predict():
actions = self.target_network.top_k_actions_for_state(state_flat, k=k)
else:
actions = self.random_state.choice(self.actions, size=k)
self._check_update_network()
return actions
def action_for_state(self, state: Any) -> Any:
return self.top_k_actions_for_state(state, k=1)[0]
def store_experience(
self, state: Any, action: Any, reward: float, done: bool, new_state: Any
):
state_flat = state.flatten()
new_state_flat = new_state.flatten()
self.buffer.store_experience(state_flat, action, reward, done, new_state_flat)
|
#grad_cam
#[keras-grad-cam/grad-cam.py](https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py)
from keras.applications.vgg16 import (VGG16, preprocess_input, decode_predictions)
from keras.models import Model
from keras.preprocessing import image
from keras.layers.core import Lambda
from keras.models import Sequential
from tensorflow.python.framework import ops
import keras.backend as K
import tensorflow as tf
import numpy as np
import keras
import sys
import cv2
#from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
#from keras.applications.vgg19 import VGG19, preprocess_input, decode_predictions
#from keras.applications.inception_v3 import InceptionV3, preprocess_input, decode_predictions
def target_category_loss(x, category_index, nb_classes):
return tf.multiply(x, K.one_hot([category_index], nb_classes))
def target_category_loss_output_shape(input_shape):
return input_shape
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
def load_image(path):
img_path = sys.argv[1]
img = image.load_img(img_path, target_size=(224,224)) #299,299)) #224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
def register_gradient():
if "GuidedBackProp" not in ops._gradient_registry._registry:
@ops.RegisterGradient("GuidedBackProp")
def _GuidedBackProp(op, grad):
dtype = op.inputs[0].dtype
return grad * tf.cast(grad > 0., dtype) * \
tf.cast(op.inputs[0] > 0., dtype)
def compile_saliency_function(model, activation_layer='block5_conv3'): #mixed10 'activation_49' add_16 add_32 activation_98
input_img = model.input
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
#print(layer_dict)
layer_output = layer_dict[activation_layer].output
max_output = K.max(layer_output, axis=3)
saliency = K.gradients(K.sum(max_output), input_img)[0]
return K.function([input_img, K.learning_phase()], [saliency])
def modify_backprop(model, name):
g = tf.get_default_graph()
with g.gradient_override_map({'Relu': name}):
# get layers that have an activation
layer_dict = [layer for layer in model.layers[1:]
if hasattr(layer, 'activation')]
# replace relu activation
for layer in layer_dict:
if layer.activation == keras.activations.relu:
layer.activation = tf.nn.relu
# re-instanciate a new model
new_model = VGG16(weights='imagenet')
#new_model = ResNet50(weights='imagenet')
new_model.summary()
return new_model
def deprocess_image(x):
'''
Same normalization as in:
https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py
'''
if np.ndim(x) > 3:
x = np.squeeze(x)
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_dim_ordering() == 'th':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def _compute_gradients(tensor, var_list):
grads = tf.gradients(tensor, var_list)
return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)]
def grad_cam(input_model, image, category_index, layer_name):
nb_classes = 1000
target_layer = lambda x: target_category_loss(x, category_index, nb_classes)
x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output)
model = Model(inputs=input_model.input, outputs=x)
#model.summary()
loss = K.sum(model.output)
conv_output = [l for l in model.layers if l.name == layer_name][0].output #is
grads = normalize(_compute_gradients(loss, [conv_output])[0])
gradient_function = K.function([model.input], [conv_output, grads])
output, grads_val = gradient_function([image])
output, grads_val = output[0, :], grads_val[0, :, :, :]
weights = np.mean(grads_val, axis = (0, 1))
cam = np.ones(output.shape[0 : 2], dtype = np.float32)
for i, w in enumerate(weights):
cam += w * output[:, :, i]
cam = cv2.resize(cam, (224,224)) #299,299)) #224, 224))
cam = np.maximum(cam, 0)
heatmap = cam / np.max(cam)
#Return to BGR [0..255] from the preprocessed image
image = image[0, :]
image -= np.min(image)
image = np.minimum(image, 255)
cam = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
cam = np.float32(cam) + np.float32(image)
cam = 255 * cam / np.max(cam)
return np.uint8(cam), heatmap
preprocessed_input = load_image(sys.argv[1])
model = VGG16(weights='imagenet')
#model = VGG19(weights='imagenet')
#model = InceptionV3(weights='imagenet')
#model = ResNet50(weights = 'imagenet')
#model.summary()
target_layer = 'block5_conv3' #'activation_49' add_16 "block5_conv3"
predictions = model.predict(preprocessed_input)
register_gradient()
guided_model = modify_backprop(model, 'GuidedBackProp')
guided_model.summary()
for i in range(5):
top_1 = decode_predictions(predictions)[0][i]
print(predictions.argsort()[0][::-1][i])
print('Predicted class:')
print('%s (%s) with probability %.2f' % (top_1[1], top_1[0], top_1[2]))
predicted_class = predictions.argsort()[0][::-1][i] #np.argmax(predictions)
cam, heatmap = grad_cam(model, preprocessed_input, predicted_class, target_layer)
cv2.imwrite("gradcam"+str(top_1[1])+".jpg", cam)
saliency_fn = compile_saliency_function(guided_model)
saliency = saliency_fn([preprocessed_input, 0])
gradcam = saliency[0] * heatmap[..., np.newaxis]
cv2.imwrite("guided_gradcam"+str(top_1[1])+".jpg", deprocess_image(gradcam))
|
import os
import hashlib
from constants import TEX_DIR
from constants import TEX_TEXT_TO_REPLACE
from constants import TEX_USE_CTEX
from constants import TEX_FIX_SVG
def tex_hash(expression, template_tex_file_body):
id_str = str(expression + template_tex_file_body)
hasher = hashlib.sha256()
hasher.update(id_str.encode())
# Truncating at 16 bytes for cleanliness
return hasher.hexdigest()[:16]
def tex_to_svg_file(expression, template_tex_file_body):
tex_file = generate_tex_file(expression, template_tex_file_body)
dvi_file = tex_to_dvi(tex_file)
return dvi_to_svg(dvi_file)
def generate_tex_file(expression, template_tex_file_body):
result = os.path.join(
TEX_DIR,
tex_hash(expression, template_tex_file_body)
) + ".tex"
if not os.path.exists(result):
print("Writing \"%s\" to %s" % (
"".join(expression), result
))
new_body = template_tex_file_body.replace(
TEX_TEXT_TO_REPLACE, expression
)
with open(result, "w") as outfile:
outfile.write(new_body)
return result
def get_null():
if os.name == "nt":
return "NUL"
return "/dev/null"
def tex_to_dvi(tex_file):
result = tex_file.replace(".tex", ".dvi" if not TEX_USE_CTEX else ".xdv")
if not os.path.exists(result):
commands = [
"latex",
"-interaction=batchmode",
"-halt-on-error",
"-output-directory=" + TEX_DIR,
tex_file,
">",
get_null()
] if not TEX_USE_CTEX else [
"xelatex",
"-no-pdf",
"-interaction=batchmode",
"-halt-on-error",
"-output-directory=" + TEX_DIR,
tex_file,
">",
get_null()
]
exit_code = os.system(" ".join(commands))
if exit_code != 0:
log_file = tex_file.replace(".tex", ".log")
raise Exception(
("Latex error converting to dvi. " if not TEX_USE_CTEX
else "Xelatex error converting to xdv. ") +
"See log output above or the log file: %s" % log_file)
return result
def dvi_to_svg(dvi_file, regen_if_exists=False):
"""
Converts a dvi, which potentially has multiple slides, into a
directory full of enumerated pngs corresponding with these slides.
Returns a list of PIL Image objects for these images sorted as they
where in the dvi
"""
result = dvi_file.replace(".dvi" if not TEX_USE_CTEX else ".xdv", ".svg")
if not os.path.exists(result):
commands = [
"dvisvgm",
dvi_file,
"-n",
"-v",
"0",
"-o",
result,
">",
get_null()
]
os.system(" ".join(commands))
if TEX_FIX_SVG:
commands = [
"cairosvg",
result,
"-f",
"svg",
"-o",
result
]
os.system(" ".join(commands))
return result
|
from django.forms.models import model_to_dict
from django.test import TestCase
from ..forms import EmailChangePasswordForm, ProfileForm
from .factories import StaffFactory, UserFactory
class BaseTestProfileForm(TestCase):
def form_data(self, user, **values):
fields = ProfileForm.Meta.fields
data = model_to_dict(user, fields)
data.update(**values)
return data
def submit_form(self, instance, **extra_data):
form = ProfileForm(instance=instance, data=self.form_data(instance, **extra_data))
if form.is_valid():
form.save()
return form
class TestProfileForm(BaseTestProfileForm):
def setUp(self):
self.user = UserFactory()
def test_email_unique(self):
other_user = UserFactory()
form = self.submit_form(self.user, email=other_user.email)
self.assertFalse(form.is_valid())
self.user.refresh_from_db()
self.assertNotEqual(self.user.email, other_user.email)
def test_can_change_email(self):
new_email = 'me@another.com'
self.submit_form(self.user, email=new_email)
self.user.refresh_from_db()
self.assertEqual(self.user.email, new_email)
def test_cant_set_slack_name(self):
slack_name = '@foobar'
self.submit_form(self.user, slack=slack_name)
self.user.refresh_from_db()
self.assertNotEqual(self.user.slack, slack_name)
class TestStaffProfileForm(BaseTestProfileForm):
def setUp(self):
self.staff = StaffFactory()
def test_cant_change_email(self):
new_email = 'me@this.com'
self.submit_form(self.staff, email=new_email)
self.staff.refresh_from_db()
self.assertNotEqual(new_email, self.staff.email)
def test_can_set_slack_name(self):
slack_name = '@foobar'
self.submit_form(self.staff, slack=slack_name)
self.staff.refresh_from_db()
self.assertEqual(self.staff.slack, slack_name)
def test_can_set_slack_name_with_trailing_space(self):
slack_name = '@foobar'
self.submit_form(self.staff, slack=slack_name)
self.staff.refresh_from_db()
self.assertEqual(self.staff.slack, slack_name)
def test_cant_set_slack_name_with_space(self):
slack_name = '@ foobar'
form = self.submit_form(self.staff, slack=slack_name)
self.assertFalse(form.is_valid())
self.staff.refresh_from_db()
self.assertNotEqual(self.staff.slack, slack_name)
def test_auto_prepend_at(self):
slack_name = 'foobar'
self.submit_form(self.staff, slack=slack_name)
self.staff.refresh_from_db()
self.assertEqual(self.staff.slack, '@' + slack_name)
def test_can_clear_slack_name(self):
slack_name = ''
self.submit_form(self.staff, slack=slack_name)
self.staff.refresh_from_db()
self.assertEqual(self.staff.slack, slack_name)
class TestEmailChangePasswordForm(TestCase):
def setUp(self):
self.user = UserFactory()
def test_doesnt_error_on_null_slack_field(self):
form = EmailChangePasswordForm(self.user)
form.save('', '', None)
def test_can_update_slack(self):
slack_name = 'foobar'
form = EmailChangePasswordForm(self.user)
form.save('', '', slack_name)
self.assertEqual(self.user.slack, slack_name)
|
from daoRefactor2 import DAO
from rssTickerInfo import rssTickerInfo
import json
import boto3
table = 'CompanyRSSFeed'
dao = DAO(table)
def main():
tickerValues = dao.getRssTickerValues('UNP')
print(tickerValues)
if __name__ == "__main__":
# calling main function
main()
|
##############################################################################
# Copyright (c) 2015 Orange
# guyrodrigue.koffi@orange.com / koffirodrigue@gmail.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from opnfv_testapi.tornado_swagger import swagger
@swagger.model()
class ProjectCreateRequest(object):
def __init__(self, name, description=''):
self.name = name
self.description = description
def format(self):
return {
"name": self.name,
"description": self.description,
}
@swagger.model()
class ProjectUpdateRequest(object):
def __init__(self, name='', description=''):
self.name = name
self.description = description
def format(self):
return {
"name": self.name,
"description": self.description,
}
@swagger.model()
class Project(object):
def __init__(self,
name=None, _id=None, description=None, create_date=None):
self._id = _id
self.name = name
self.description = description
self.creation_date = create_date
@staticmethod
def from_dict(res_dict):
if res_dict is None:
return None
t = Project()
t._id = res_dict.get('_id')
t.creation_date = res_dict.get('creation_date')
t.name = res_dict.get('name')
t.description = res_dict.get('description')
return t
def format(self):
return {
"name": self.name,
"description": self.description,
"creation_date": str(self.creation_date)
}
def format_http(self):
return {
"_id": str(self._id),
"name": self.name,
"description": self.description,
"creation_date": str(self.creation_date),
}
@swagger.model()
class Projects(object):
"""
@property projects:
@ptype projects: C{list} of L{Project}
"""
def __init__(self):
self.projects = list()
@staticmethod
def from_dict(res_dict):
if res_dict is None:
return None
res = Projects()
for project in res_dict.get('projects'):
res.projects.append(Project.from_dict(project))
return res
|
"""Random walk routines
"""
from .._ffi.function import _init_api
from .. import backend as F
from ..base import DGLError
from .. import ndarray as nd
from .. import utils
__all__ = [
'random_walk',
'pack_traces']
def random_walk(g, nodes, *, metapath=None, length=None, prob=None, restart_prob=None):
"""Generate random walk traces from an array of starting nodes based on the given metapath.
For a single starting node, ``num_traces`` traces would be generated. A trace would
1. Start from the given node and set ``t`` to 0.
2. Pick and traverse along edge type ``metapath[t]`` from the current node.
3. If no edge can be found, halt. Otherwise, increment ``t`` and go to step 2.
The returned traces all have length ``len(metapath) + 1``, where the first node
is the starting node itself.
If a random walk stops in advance, DGL pads the trace with -1 to have the same
length.
Parameters
----------
g : DGLGraph
The graph. Must be on CPU.
nodes : Tensor
Node ID tensor from which the random walk traces starts.
The tensor must be on CPU, and must have the same dtype as the ID type
of the graph.
metapath : list[str or tuple of str], optional
Metapath, specified as a list of edge types.
Mutually exclusive with :attr:`length`.
If omitted, DGL assumes that ``g`` only has one node & edge type. In this
case, the argument ``length`` specifies the length of random walk traces.
length : int, optional
Length of random walks.
Mutually exclusive with :attr:`metapath`.
Only used when :attr:`metapath` is None.
prob : str, optional
The name of the edge feature tensor on the graph storing the (unnormalized)
probabilities associated with each edge for choosing the next node.
The feature tensor must be non-negative and the sum of the probabilities
must be positive for the outbound edges of all nodes (although they don't have
to sum up to one). The result will be undefined otherwise.
If omitted, DGL assumes that the neighbors are picked uniformly.
restart_prob : float or Tensor, optional
Probability to terminate the current trace before each transition.
If a tensor is given, :attr:`restart_prob` should have the same length as
:attr:`metapath` or :attr:`length`.
Returns
-------
traces : Tensor
A 2-dimensional node ID tensor with shape ``(num_seeds, len(metapath) + 1)`` or
``(num_seeds, length + 1)`` if :attr:`metapath` is None.
types : Tensor
A 1-dimensional node type ID tensor with shape ``(len(metapath) + 1)`` or
``(length + 1)``.
The type IDs match the ones in the original graph ``g``.
Notes
-----
The returned tensors are on CPU.
Examples
--------
The following creates a homogeneous graph:
>>> g1 = dgl.graph([(0, 1), (1, 2), (1, 3), (2, 0), (3, 0)], 'user', 'follow')
Normal random walk:
>>> dgl.sampling.random_walk(g1, [0, 1, 2, 0], length=4)
(tensor([[0, 1, 2, 0, 1],
[1, 3, 0, 1, 3],
[2, 0, 1, 3, 0],
[0, 1, 2, 0, 1]]), tensor([0, 0, 0, 0, 0]))
The first tensor indicates the random walk path for each seed node.
The j-th element in the second tensor indicates the node type ID of the j-th node
in every path. In this case, it is returning all 0 (``user``).
Random walk with restart:
>>> dgl.sampling.random_walk_with_restart(g1, [0, 1, 2, 0], length=4, restart_prob=0.5)
(tensor([[ 0, -1, -1, -1, -1],
[ 1, 3, 0, -1, -1],
[ 2, -1, -1, -1, -1],
[ 0, -1, -1, -1, -1]]), tensor([0, 0, 0, 0, 0]))
Non-uniform random walk:
>>> g1.edata['p'] = torch.FloatTensor([1, 0, 1, 1, 1]) # disallow going from 1 to 2
>>> dgl.sampling.random_walk(g1, [0, 1, 2, 0], length=4, prob='p')
(tensor([[0, 1, 3, 0, 1],
[1, 3, 0, 1, 3],
[2, 0, 1, 3, 0],
[0, 1, 3, 0, 1]]), tensor([0, 0, 0, 0, 0]))
Metapath-based random walk:
>>> g2 = dgl.heterograph({
... ('user', 'follow', 'user'): [(0, 1), (1, 2), (1, 3), (2, 0), (3, 0)],
... ('user', 'view', 'item'): [(0, 0), (0, 1), (1, 1), (2, 2), (3, 2), (3, 1)],
... ('item', 'viewed-by', 'user'): [(0, 0), (1, 0), (1, 1), (2, 2), (2, 3), (1, 3)]})
>>> dgl.sampling.random_walk(
... g2, [0, 1, 2, 0], metapath=['follow', 'view', 'viewed-by'] * 2)
(tensor([[0, 1, 1, 1, 2, 2, 3],
[1, 3, 1, 1, 2, 2, 2],
[2, 0, 1, 1, 3, 1, 1],
[0, 1, 1, 0, 1, 1, 3]]), tensor([0, 0, 1, 0, 0, 1, 0]))
Metapath-based random walk, with restarts only on items (i.e. after traversing a "view"
relationship):
>>> dgl.sampling.random_walk(
... g2, [0, 1, 2, 0], metapath=['follow', 'view', 'viewed-by'] * 2,
... restart_prob=torch.FloatTensor([0, 0.5, 0, 0, 0.5, 0]))
(tensor([[ 0, 1, -1, -1, -1, -1, -1],
[ 1, 3, 1, 0, 1, 1, 0],
[ 2, 0, 1, 1, 3, 2, 2],
[ 0, 1, 1, 3, 0, 0, 0]]), tensor([0, 0, 1, 0, 0, 1, 0]))
"""
assert g.device == F.cpu(), "Graph must be on CPU."
n_etypes = len(g.canonical_etypes)
n_ntypes = len(g.ntypes)
if metapath is None:
if n_etypes > 1 or n_ntypes > 1:
raise DGLError("metapath not specified and the graph is not homogeneous.")
if length is None:
raise ValueError("Please specify either the metapath or the random walk length.")
metapath = [0] * length
else:
metapath = [g.get_etype_id(etype) for etype in metapath]
gidx = g._graph
nodes = F.to_dgl_nd(utils.prepare_tensor(g, nodes, 'nodes'))
metapath = F.to_dgl_nd(utils.prepare_tensor(g, metapath, 'metapath'))
# Load the probability tensor from the edge frames
if prob is None:
p_nd = [nd.array([], ctx=nodes.ctx) for _ in g.canonical_etypes]
else:
p_nd = []
for etype in g.canonical_etypes:
if prob in g.edges[etype].data:
prob_nd = F.to_dgl_nd(g.edges[etype].data[prob])
if prob_nd.ctx != nodes.ctx:
raise ValueError(
'context of seed node array and edges[%s].data[%s] are different' %
(etype, prob))
else:
prob_nd = nd.array([], ctx=nodes.ctx)
p_nd.append(prob_nd)
# Actual random walk
if restart_prob is None:
traces, types = _CAPI_DGLSamplingRandomWalk(gidx, nodes, metapath, p_nd)
elif F.is_tensor(restart_prob):
restart_prob = F.to_dgl_nd(restart_prob)
traces, types = _CAPI_DGLSamplingRandomWalkWithStepwiseRestart(
gidx, nodes, metapath, p_nd, restart_prob)
else:
traces, types = _CAPI_DGLSamplingRandomWalkWithRestart(
gidx, nodes, metapath, p_nd, restart_prob)
traces = F.from_dgl_nd(traces)
types = F.from_dgl_nd(types)
return traces, types
def pack_traces(traces, types):
"""Pack the padded traces returned by ``random_walk()`` into a concatenated array.
The padding values (-1) are removed, and the length and offset of each trace is
returned along with the concatenated node ID and node type arrays.
Parameters
----------
traces : Tensor
A 2-dimensional node ID tensor. Must be on CPU and either ``int32`` or ``int64``.
types : Tensor
A 1-dimensional node type ID tensor. Must be on CPU and either ``int32`` or ``int64``.
Returns
-------
concat_vids : Tensor
An array of all node IDs concatenated and padding values removed.
concat_types : Tensor
An array of node types corresponding for each node in ``concat_vids``.
Has the same length as ``concat_vids``.
lengths : Tensor
Length of each trace in the original traces tensor.
offsets : Tensor
Offset of each trace in the originial traces tensor in the new concatenated tensor.
Notes
-----
The returned tensors are on CPU.
Examples
--------
>>> g2 = dgl.heterograph({
... ('user', 'follow', 'user'): [(0, 1), (1, 2), (1, 3), (2, 0), (3, 0)],
... ('user', 'view', 'item'): [(0, 0), (0, 1), (1, 1), (2, 2), (3, 2), (3, 1)],
... ('item', 'viewed-by', 'user'): [(0, 0), (1, 0), (1, 1), (2, 2), (2, 3), (1, 3)]})
>>> traces, types = dgl.sampling.random_walk(
... g2, [0, 0], metapath=['follow', 'view', 'viewed-by'] * 2,
... restart_prob=torch.FloatTensor([0, 0.5, 0, 0, 0.5, 0]))
>>> traces, types
(tensor([[ 0, 1, -1, -1, -1, -1, -1],
[ 0, 1, 1, 3, 0, 0, 0]]), tensor([0, 0, 1, 0, 0, 1, 0]))
>>> concat_vids, concat_types, lengths, offsets = dgl.sampling.pack_traces(traces, types)
>>> concat_vids
tensor([0, 1, 0, 1, 1, 3, 0, 0, 0])
>>> concat_types
tensor([0, 0, 0, 0, 1, 0, 0, 1, 0])
>>> lengths
tensor([2, 7])
>>> offsets
tensor([0, 2]))
The first tensor ``concat_vids`` is the concatenation of all paths, i.e. flattened array
of ``traces``, excluding all padding values (-1).
The second tensor ``concat_types`` stands for the node type IDs of all corresponding nodes
in the first tensor.
The third and fourth tensor indicates the length and the offset of each path. With these
tensors it is easy to obtain the i-th random walk path with:
>>> vids = concat_vids.split(lengths.tolist())
>>> vtypes = concat_vtypes.split(lengths.tolist())
>>> vids[1], vtypes[1]
(tensor([0, 1, 1, 3, 0, 0, 0]), tensor([0, 0, 1, 0, 0, 1, 0]))
"""
assert F.is_tensor(traces) and F.context(traces) == F.cpu(), "traces must be a CPU tensor"
assert F.is_tensor(types) and F.context(types) == F.cpu(), "types must be a CPU tensor"
traces = F.to_dgl_nd(traces)
types = F.to_dgl_nd(types)
concat_vids, concat_types, lengths, offsets = _CAPI_DGLSamplingPackTraces(traces, types)
concat_vids = F.from_dgl_nd(concat_vids)
concat_types = F.from_dgl_nd(concat_types)
lengths = F.from_dgl_nd(lengths)
offsets = F.from_dgl_nd(offsets)
return concat_vids, concat_types, lengths, offsets
_init_api('dgl.sampling.randomwalks', __name__)
|
import vdomr as vd
import spikeforest as sf
from cairio import client as ca
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
class AccuracyPlot(vd.components.Pyplot):
def __init__(self, snrs, accuracies):
vd.components.Pyplot.__init__(self)
self._snrs = snrs
self._accuracies = accuracies
def plot(self):
plt.scatter(self._snrs, self._accuracies)
class StudySorterFigure(vd.Component):
def __init__(self, sfdata):
vd.Component.__init__(self)
self._plot = None
self._SF_data = sfdata
self._study = None
self._sorter = None
def setStudySorter(self, *, study, sorter):
self._study = study
self._sorter = sorter
self._update_plot()
def _update_plot(self):
SF = self._SF_data
study = SF.study(self._study)
b = _get_study_sorting_results(study)
a = b[self._sorter]
snrs = a['true_unit_snrs']
accuracies = a['num_matches'] / \
(a['num_matches']+a['num_false_positives']+a['num_false_negatives'])
self._plot = AccuracyPlot(snrs, accuracies)
self.refresh()
def render(self):
if self._plot is None:
return vd.div('Nothing')
return vd.div(
vd.div('test '+self._study+' '+self._sorter),
self._plot
)
class SFBrowser(vd.Component):
def __init__(self, output_id):
vd.Component.__init__(self)
self._output_id = output_id
a = ca.loadObject(
key=dict(name='spikeforest_results'),
subkey=output_id
)
if not a:
print('ERROR: unable to open results: '+output_id)
return
if ('recordings' not in a) or ('studies' not in a) or ('sorting_results' not in a):
print('ERROR: problem with output: '+output_id)
return
studies = a['studies']
recordings = a['recordings']
sorting_results = a['sorting_results']
SF = sf.SFData()
SF.loadStudies(studies)
SF.loadRecordings2(recordings)
SF.loadSortingResults(sorting_results)
# sorter_names=[]
# for SR in sorting_results:
# sorter_names.append(SR['sorter']['name'])
# sorter_names=list(set(sorter_names))
# sorter_names.sort()
self._SF_data = SF
self._accuracy_threshold_input = vd.components.LineEdit(
value=0.8, dtype=float, style=dict(width='70px'))
self._update_button = vd.components.Button(
onclick=self._on_update, class_='button', label='Update')
self._study_sorter_fig = StudySorterFigure(SF)
self._study_sorter_table = vd.div() # dummy
vd.devel.loadBootstrap()
self._update_accuracy_table()
def _on_update(self):
self._update_accuracy_table()
def _update_accuracy_table(self):
accuracy_threshold = self._accuracy_threshold_input.value()
self._accuracy_table_data, self._sorters = self._get_accuracy_table_data(
accuracy_threshold=accuracy_threshold)
self._accuracy_table = self._to_table(
self._accuracy_table_data, ['study']+self._sorters)
print(self._accuracy_table_data)
self.refresh()
def _open_study_sorter_fig(self, *, sorter, study):
self._study_sorter_fig.setStudySorter(study=study, sorter=sorter)
def _get_accuracy_table_data(self, *, accuracy_threshold):
SF = self._SF_data
accuracy_table = []
sorters = set()
for sname in SF.studyNames():
print('STUDY: '+sname)
study = SF.study(sname)
b = _get_study_sorting_results(study)
tmp = dict(
study=dict( # first column
text=sname
)
)
for sorter in b:
sorters.add(sorter)
a = b[sorter]
accuracies = a['num_matches'] / \
(a['num_matches']+a['num_false_positives'] +
a['num_false_negatives'])
tmp[sorter] = dict(
text=str(np.count_nonzero(
accuracies >= accuracy_threshold)),
callback=lambda sorter=sorter, study=sname: self._open_study_sorter_fig(
sorter=sorter, study=study)
)
accuracy_table.append(tmp)
sorters = list(sorters)
sorters.sort()
return accuracy_table, sorters
def _to_table(self, X, column_names):
rows = []
rows.append(vd.tr([vd.th(cname) for cname in column_names]))
for x in X:
elmts = []
for cname in column_names:
tmp = x.get(cname)
if tmp:
if 'callback' in tmp:
elmt = vd.a(tmp['text'], onclick=tmp['callback'])
else:
elmt = vd.span(tmp['text'])
else:
elmt = vd.span('N/A')
elmts.append(elmt)
rows.append(vd.tr([vd.td(elmt) for elmt in elmts]))
return vd.table(rows, class_='table')
def render(self):
return vd.div(
vd.table(
vd.tr(
vd.td('Accuracy threshold:'),
vd.td(self._accuracy_threshold_input),
vd.td(self._update_button)
),
class_='table',
style={'max-width': '200px'}
),
vd.components.ScrollArea(
self._accuracy_table,
height=500
),
self._study_sorter_fig,
style=dict(padding='15px')
)
def _get_study_sorting_results(study):
results = []
for rname in study.recordingNames():
rec = study.recording(rname)
true_units_info = rec.trueUnitsInfo(format='json')
true_units_info_by_id = dict()
for true_unit in true_units_info:
true_units_info_by_id[true_unit['unit_id']] = true_unit
for srname in rec.sortingResultNames():
a = rec.sortingResult(srname)
res0 = dict(sorter=srname, recording=rname, study=study.name())
tmp = a.comparisonWithTruth(format='json')
for i in tmp:
tmp[i]['true_unit_info'] = true_units_info_by_id[tmp[i]['unit_id']]
res0['comparison_with_truth'] = tmp
results.append(res0)
sorters = list(set([a['sorter'] for a in results]))
sorters.sort()
units_by_sorter = dict()
for sorter in sorters:
units_by_sorter[sorter] = []
for obj in results:
sorter0 = obj['sorter']
units = [obj['comparison_with_truth'][i]
for i in obj['comparison_with_truth']]
units_by_sorter[sorter0] = units_by_sorter[sorter0]+units
ret = dict()
for sorter in sorters:
units = units_by_sorter[sorter]
try:
ret[sorter] = dict(
true_unit_ids=[unit['unit_id'] for unit in units],
true_unit_snrs=np.array(
[unit['true_unit_info']['snr'] for unit in units]),
true_unit_firing_rates=np.array(
[unit['true_unit_info']['firing_rate'] for unit in units]),
num_matches=np.array([unit['num_matches'] for unit in units]),
num_false_positives=np.array(
[unit['num_false_positives'] for unit in units]),
num_false_negatives=np.array(
[unit['num_false_negatives'] for unit in units])
)
except:
print('WARNING: Problem loading results for sorter: '+sorter)
ret[sorter] = dict(
true_unit_ids=[],
true_unit_snrs=np.array([]),
true_unit_firing_rates=np.array([]),
num_matches=np.array([]),
num_false_positives=np.array([]),
num_false_negatives=np.array([])
)
return ret
|
import sys
import unittest
from unittest import mock
from warnings import catch_warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.item import ABCMeta, DictItem, Field, Item, ItemMeta
PY36_PLUS = (sys.version_info.major >= 3) and (sys.version_info.minor >= 6)
class ItemTest(unittest.TestCase):
def assertSortedEqual(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg)
def test_simple(self):
class TestItem(Item):
name = Field()
i = TestItem()
i['name'] = u'name'
self.assertEqual(i['name'], u'name')
def test_init(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(KeyError, i.__getitem__, 'name')
i2 = TestItem(name=u'john doe')
self.assertEqual(i2['name'], u'john doe')
i3 = TestItem({'name': u'john doe'})
self.assertEqual(i3['name'], u'john doe')
i4 = TestItem(i3)
self.assertEqual(i4['name'], u'john doe')
self.assertRaises(KeyError, TestItem, {'name': u'john doe',
'other': u'foo'})
def test_invalid_field(self):
class TestItem(Item):
pass
i = TestItem()
self.assertRaises(KeyError, i.__setitem__, 'field', 'text')
self.assertRaises(KeyError, i.__getitem__, 'field')
def test_repr(self):
class TestItem(Item):
name = Field()
number = Field()
i = TestItem()
i['name'] = u'John Doe'
i['number'] = 123
itemrepr = repr(i)
self.assertEqual(itemrepr,
"{'name': 'John Doe', 'number': 123}")
i2 = eval(itemrepr)
self.assertEqual(i2['name'], 'John Doe')
self.assertEqual(i2['number'], 123)
def test_private_attr(self):
class TestItem(Item):
name = Field()
i = TestItem()
i._private = 'test'
self.assertEqual(i._private, 'test')
def test_raise_getattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(AttributeError, getattr, i, 'name')
def test_raise_setattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(AttributeError, setattr, i, 'name', 'john')
def test_custom_methods(self):
class TestItem(Item):
name = Field()
def get_name(self):
return self['name']
def change_name(self, name):
self['name'] = name
i = TestItem()
self.assertRaises(KeyError, i.get_name)
i['name'] = u'lala'
self.assertEqual(i.get_name(), u'lala')
i.change_name(u'other')
self.assertEqual(i.get_name(), 'other')
def test_metaclass(self):
class TestItem(Item):
name = Field()
keys = Field()
values = Field()
i = TestItem()
i['name'] = u'John'
self.assertEqual(list(i.keys()), ['name'])
self.assertEqual(list(i.values()), ['John'])
i['keys'] = u'Keys'
i['values'] = u'Values'
self.assertSortedEqual(list(i.keys()), ['keys', 'values', 'name'])
self.assertSortedEqual(list(i.values()), [u'Keys', u'Values', u'John'])
def test_metaclass_with_fields_attribute(self):
class TestItem(Item):
fields = {'new': Field(default='X')}
item = TestItem(new=u'New')
self.assertSortedEqual(list(item.keys()), ['new'])
self.assertSortedEqual(list(item.values()), [u'New'])
def test_metaclass_inheritance(self):
class BaseItem(Item):
name = Field()
keys = Field()
values = Field()
class TestItem(BaseItem):
keys = Field()
i = TestItem()
i['keys'] = 3
self.assertEqual(list(i.keys()), ['keys'])
self.assertEqual(list(i.values()), [3])
def test_metaclass_multiple_inheritance_simple(self):
class A(Item):
fields = {'load': Field(default='A')}
save = Field(default='A')
class B(A):
pass
class C(Item):
fields = {'load': Field(default='C')}
save = Field(default='C')
class D(B, C):
pass
item = D(save='X', load='Y')
self.assertEqual(item['save'], 'X')
self.assertEqual(item['load'], 'Y')
self.assertEqual(D.fields, {'load': {'default': 'A'},
'save': {'default': 'A'}})
# D class inverted
class E(C, B):
pass
self.assertEqual(E(save='X')['save'], 'X')
self.assertEqual(E(load='X')['load'], 'X')
self.assertEqual(E.fields, {'load': {'default': 'C'},
'save': {'default': 'C'}})
def test_metaclass_multiple_inheritance_diamond(self):
class A(Item):
fields = {'update': Field(default='A')}
save = Field(default='A')
load = Field(default='A')
class B(A):
pass
class C(A):
fields = {'update': Field(default='C')}
save = Field(default='C')
class D(B, C):
fields = {'update': Field(default='D')}
load = Field(default='D')
self.assertEqual(D(save='X')['save'], 'X')
self.assertEqual(D(load='X')['load'], 'X')
self.assertEqual(D.fields, {'save': {'default': 'C'},
'load': {'default': 'D'}, 'update': {'default': 'D'}})
# D class inverted
class E(C, B):
load = Field(default='E')
self.assertEqual(E(save='X')['save'], 'X')
self.assertEqual(E(load='X')['load'], 'X')
self.assertEqual(E.fields, {'save': {'default': 'C'},
'load': {'default': 'E'}, 'update': {'default': 'C'}})
def test_metaclass_multiple_inheritance_without_metaclass(self):
class A(Item):
fields = {'load': Field(default='A')}
save = Field(default='A')
class B(A):
pass
class C(object):
fields = {'load': Field(default='C')}
not_allowed = Field(default='not_allowed')
save = Field(default='C')
class D(B, C):
pass
self.assertRaises(KeyError, D, not_allowed='value')
self.assertEqual(D(save='X')['save'], 'X')
self.assertEqual(D.fields, {'save': {'default': 'A'},
'load': {'default': 'A'}})
# D class inverted
class E(C, B):
pass
self.assertRaises(KeyError, E, not_allowed='value')
self.assertEqual(E(save='X')['save'], 'X')
self.assertEqual(E.fields, {'save': {'default': 'A'},
'load': {'default': 'A'}})
def test_to_dict(self):
class TestItem(Item):
name = Field()
i = TestItem()
i['name'] = u'John'
self.assertEqual(dict(i), {'name': u'John'})
def test_copy(self):
class TestItem(Item):
name = Field()
item = TestItem({'name': 'lower'})
copied_item = item.copy()
self.assertNotEqual(id(item), id(copied_item))
copied_item['name'] = copied_item['name'].upper()
self.assertNotEqual(item['name'], copied_item['name'])
def test_deepcopy(self):
class TestItem(Item):
tags = Field()
item = TestItem({'tags': ['tag1']})
copied_item = item.deepcopy()
item['tags'].append('tag2')
assert item['tags'] != copied_item['tags']
def test_dictitem_deprecation_warning(self):
"""Make sure the DictItem deprecation warning is not issued for
Item"""
with catch_warnings(record=True) as warnings:
item = Item()
self.assertEqual(len(warnings), 0)
class SubclassedItem(Item):
pass
subclassed_item = SubclassedItem()
self.assertEqual(len(warnings), 0)
class ItemMetaTest(unittest.TestCase):
def test_new_method_propagates_classcell(self):
new_mock = mock.Mock(side_effect=ABCMeta.__new__)
base = ItemMeta.__bases__[0]
with mock.patch.object(base, '__new__', new_mock):
class MyItem(Item):
if not PY36_PLUS:
# This attribute is an internal attribute in Python 3.6+
# and must be propagated properly. See
# https://docs.python.org/3.6/reference/datamodel.html#creating-the-class-object
# In <3.6, we add a dummy attribute just to ensure the
# __new__ method propagates it correctly.
__classcell__ = object()
def f(self):
# For rationale of this see:
# https://github.com/python/cpython/blob/ee1a81b77444c6715cbe610e951c655b6adab88b/Lib/test/test_super.py#L222
return __class__ # noqa https://github.com/scrapy/scrapy/issues/2836
MyItem()
(first_call, second_call) = new_mock.call_args_list[-2:]
mcs, class_name, bases, attrs = first_call[0]
assert '__classcell__' not in attrs
mcs, class_name, bases, attrs = second_call[0]
assert '__classcell__' in attrs
class ItemMetaClassCellRegression(unittest.TestCase):
def test_item_meta_classcell_regression(self):
class MyItem(Item, metaclass=ItemMeta):
def __init__(self, *args, **kwargs):
# This call to super() trigger the __classcell__ propagation
# requirement. When not done properly raises an error:
# TypeError: __class__ set to <class '__main__.MyItem'>
# defining 'MyItem' as <class '__main__.MyItem'>
super(MyItem, self).__init__(*args, **kwargs)
class DictItemTest(unittest.TestCase):
def test_deprecation_warning(self):
with catch_warnings(record=True) as warnings:
dict_item = DictItem()
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0].category, ScrapyDeprecationWarning)
with catch_warnings(record=True) as warnings:
class SubclassedDictItem(DictItem):
pass
subclassed_dict_item = SubclassedDictItem()
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0].category, ScrapyDeprecationWarning)
if __name__ == "__main__":
unittest.main()
|
"""
A CapitalT class and methods that use the Cross class.
Authors: David Mutchler, Vibha Alangar, Dave Fisher, Amanda Stouder,
their colleagues and Xiaolong Chen (Harry).
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
import math
def main():
"""
Calls the test functions.
As you implement CapitalT method uncomment the appropriate tests.
"""
# --------------------------------------------------------------
# Uncomment only 1 test at a time as you develop your code.
# --------------------------------------------------------------
print('Un-comment the calls in MAIN one by one')
print(' to run the testing code as you complete the TODOs.')
run_test_simple_t()
run_test_set_colors()
run_test_move_by()
run_test_clone()
def run_test_simple_t():
"""
Tests for the __init__ method and attach_to method.
See the simple_t PDF for expected output.
"""
print()
print('--------------------------------------------------')
print('Testing __init__ and attach_to ')
print('--------------------------------------------------')
window = rg.RoseWindow(600, 400, 'Test 1 - Simple Ts')
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
print("Expected: Point(250.0, 40.0) Point(350.0, 60.0)")
print("Actual: ", t1.h_rect.get_upper_left_corner(), t1.h_rect.get_lower_right_corner())
print("Expected: Point(290.0, 40.0) Point(310.0, 240.0)")
print("Actual: ", t1.v_rect.get_upper_left_corner(), t1.v_rect.get_lower_right_corner())
t1.attach_to(window)
t2 = CapitalT(rg.Point(150, 150), 100, 150, 40)
t2.attach_to(window)
t3 = CapitalT(rg.Point(450, 150), 10, 15, 4)
t3.attach_to(window)
window.render()
print("See graphics window and compare to the simple_t PDF")
window.close_on_mouse_click()
def run_test_set_colors():
""" Tests for the set_colors method. See the set_colors PDF for expected output. """
window = rg.RoseWindow(600, 400, 'Test 2 - Colorful Ts')
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_colors('red', 'magenta')
t1.attach_to(window)
t2 = CapitalT(rg.Point(150, 150), 100, 150, 40)
t2.set_colors('green', 'purple')
t2.attach_to(window)
t3 = CapitalT(rg.Point(450, 150), 10, 15, 4)
t3.set_colors('blue', 'gray')
t3.attach_to(window)
window.render()
window.close_on_mouse_click()
def run_test_move_by():
""" Tests for the move_by method. See the move_by PDF for expected output. """
window = rg.RoseWindow(600, 400, 'Test 3 - Moving T')
little_red_t = CapitalT(rg.Point(300, 50), 60, 80, 5)
little_red_t.set_colors('red', 'gray')
little_red_t.attach_to(window)
window.render(0.5)
little_red_t.move_by(0, 100)
window.render(0.5)
little_red_t.move_by(0, 100)
window.render(0.5)
for k in range(40):
little_red_t.move_by(5, -2)
window.render(0.05)
window.close_on_mouse_click()
def run_test_clone():
""" Tests for the clone method. See the clone PDF for expected output. """
window = rg.RoseWindow(650, 400, 'Test 4 - Cloning Ts')
first_t = CapitalT(rg.Point(75, 50), 80, 80, 40)
first_t.set_colors('blue', 'cyan')
for k in range(6):
t = first_t.clone()
if k < 2:
t.set_colors('white', 'black')
t.move_by(100 * k, 20 * k)
t.attach_to(window)
first_t.move_by(0, 200)
first_t.attach_to(window)
window.render()
window.close_on_mouse_click()
########################################################################
# The CapitalT class (and its methods) begins here.
########################################################################
class CapitalT(object):
"""
Manages a CapitalT graphics object which is made up of two rectangles.
See the PDFs, especially dimenstions.pdf, to help you understand this.
"""
def __init__(self, intersection_center, width, height, letter_thickness):
"""
What comes in:
-- self
-- an rg.Point for the intersection center of the CapitalT
-- This point is also center of the horizontal rectangle.
-- a int for the width of the CapitalT (the width of the horizontal rectangle)
-- a int for the height of the CapitalT (the height of the vertical rectangle)
-- a int for the thickness of each rectangle (the letter's thickness)
What goes out: Nothing (i.e., None).
Side effects: Sets two instance variables named:
-- h_rect (to represent the horizontal rectangle in the T, the top bar)
-- v_rect (to represent the vertical rectangle in the T, the | part of the T)
*** See the dimensions PDF for the exact placement of the rectangles in the T. ***
Each rectangle is an rg.Rectangle. Unlike prior modules you are NOT
allowed to make any other instance variables. You may only use
exactly these two and must figure out how to do the problem with ONLY
those two instance variables.
Example:
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
-- t1.h_rect would have an upper left corner of (250, 40)
-- t1.h_rect would have an lower right corner of (350, 60)
-- t1.v_rect would have an upper left corner of (290, 40)
-- t1.v_rect would have an lower right corner of (310, 240)
Type hints:
:type intersection_center: rg.Point
:type width: int
:type height: int
:type letter_thickness: int
"""
hupperright = rg.Point(intersection_center.x+(1/2)*width, intersection_center.y-(1/2)*letter_thickness)
hlowerleft = rg.Point(intersection_center.x-(1/2)*width,intersection_center.y+(1/2)*letter_thickness)
self.h_rect = rg.Rectangle(hupperright,hlowerleft)
vupperright = rg.Point(intersection_center.x + (1/2)*letter_thickness,hupperright.y)
vlowerleft = rg.Point(intersection_center.x-(1/2)*letter_thickness,hlowerleft.y+(height-letter_thickness))
self.v_rect = rg.Rectangle(vupperright, vlowerleft)
# --------------------------------------------------------------
# DONE: 3.
# READ the above specification, including the Example.
# Implement this method
# Note: you will need to also implement attach_to before testing
# --------------------------------------------------------------
def attach_to(self, window):
"""
What comes in:
-- self
-- an rg.RoseWindow
What goes out: Nothing (i.e., None).
Side effects:
-- Attaches both instance rectangles to the given window.
-- Hint: Attach h_rect second to make it draw in front of v_rect
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.attach_to(window)
Type hints:
:type window: rg.RoseWindow
"""
self.v_rect.attach_to(window)
self.h_rect.attach_to(window)
# --------------------------------------------------------------
# DONE: 4.
# READ the above specification, including the Example.
# Implement and test this method by looking at the console and
# the graphics window (compare it to simple_t.pdf)
# --------------------------------------------------------------
def set_colors(self, fill_color, outline_color):
"""
What comes in:
-- self
-- a string that represents a valid rosegraphics color
-- a string that represents a valid rosegraphics color
What goes out: Nothing (i.e., None).
Side effects:
-- sets the fill_color of both rectangles to the given fill color
-- sets the outline_color of both rectangles to the given outline color
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_color('red', 'blue')
Type hints:
:type fill_color: str
:type outline_color: str
"""
self.h_rect.fill_color = fill_color
self.v_rect.fill_color = fill_color
self.h_rect.outline_color = outline_color
self.v_rect.outline_color =outline_color
# --------------------------------------------------------------
# DONE: 5.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# set_colors.pdf.
# --------------------------------------------------------------
def move_by(self, dx, dy):
"""
What comes in:
-- self
-- an int amount to move in the x direction
-- an int amount to move in the y direction
What goes out: Nothing (i.e., None).
Side effects:
-- Moves both h_rect and v_rect the specified dx and dy amounts.
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.attach_to(window)
window.render(0.5)
t1.move_by(100, 200) # Moves the T 100 pixels right and 200 down.
window.render() # necessary to see the change
Type hints:
:type dx: int
:type dy: int
"""
self.h_rect.corner_1.x +=dx
self.h_rect .corner_2.x +=dx
self.h_rect.corner_1.y += dy
self.h_rect.corner_2.y += dy
self.v_rect.corner_1.x += dx
self.v_rect.corner_2.x += dx
self.v_rect.corner_1.y += dy
self.v_rect.corner_2.y += dy
# --------------------------------------------------------------
# DONE: 6.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# move_by.pdf. Note: the pdf shows the different locations
# that the T moves through, but there is only 1 T at any moment.
# --------------------------------------------------------------
def clone(self):
"""
What comes in:
-- self
What goes out:
-- Returns a new CapitalT that is located in the same position as
this CapitalT with the same colors for the rectangles.
Side effects:
-- None
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_color('red', 'blue')
t2 = t1.clone() # t2 is at the same location WITH THE SAME COLORS
Type hints:
:rtype: CapitalT
"""
h = self.h_rect
v = self.v_rect
intersect = rg.Point((1/2)*h.get_upper_right_corner().x,(1/2)*h.get_upper_right_corner().y+h.get_lower_left_corner().y)
thickness = math.fabs(h.get_upper_right_corner().y-h.get_lower_left_corner().y)
clone = CapitalT(intersect,h.get_width(),v.get_height(),thickness)
clone.set_colors(self.h_rect.fill_color,self.h_rect .outline_color)
return clone
# --------------------------------------------------------------
# DONE: 7.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# clone.pdf.
# --------------------------------------------------------------
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
import time
from collections import deque
from datetime import datetime
import backtrader as bt
from backtrader.feed import DataBase
from backtrader.utils.py3 import with_metaclass
from .cryptostore import CryptoStore
class MetaCryptoFeed(DataBase.__class__):
def __init__(cls, name, bases, dct):
'''Class has already been created ... register'''
# Initialize the class
super(MetaCryptoFeed, cls).__init__(name, bases, dct)
# Register with the store
CryptoStore.DataCls = cls
class CryptoFeed(with_metaclass(MetaCryptoFeed, DataBase)):
"""
CryptoCurrency eXchange Trading Library Data Feed.
Params:
- ``historical`` (default: ``False``)
If set to ``True`` the data feed will stop after doing the first
download of data.
The standard data feed parameters ``fromdate`` and ``todate`` will be
used as reference.
- ``backfill_start`` (default: ``True``)
Perform backfilling at the start. The maximum possible historical data
will be fetched in a single request.
Changes From Ed's pacakge
- Added option to send some additional fetch_ohlcv_params. Some exchanges (e.g Bitmex)
support sending some additional fetch parameters.
- Added drop_newest option to avoid loading incomplete candles where exchanges
do not support sending ohlcv params to prevent returning partial data
"""
params = (
('historical', False), # only historical download
('backfill_start', False), # do backfilling at the start
('fetch_ohlcv_params', {}),
('ohlcv_limit', 20),
('drop_newest', False),
('debug', False)
)
_store = CryptoStore
# States for the Finite State Machine in _load
_ST_LIVE, _ST_HISTORBACK, _ST_OVER = range(3)
# def __init__(self, exchange, symbol, ohlcv_limit=None, config={}, retries=5):
def __init__(self, **kwargs):
# self.store = CryptoStore(exchange, config, retries)
self.store = self._store(**kwargs)
self._data = deque() # data queue for price data
self._last_id = '' # last processed trade id for ohlcv
self._last_ts = 0 # last processed timestamp for ohlcv
self._ts_delta = None # timestamp delta for ohlcv
def start(self, ):
DataBase.start(self)
if self.p.fromdate:
self._state = self._ST_HISTORBACK
self.put_notification(self.DELAYED)
self._fetch_ohlcv(self.p.fromdate)
else:
self._state = self._ST_LIVE
self.put_notification(self.LIVE)
def _load(self):
if self._state == self._ST_OVER:
return False
while True:
if self._state == self._ST_LIVE:
if self._timeframe == bt.TimeFrame.Ticks:
return self._load_ticks()
else:
# INFO: Fix to address slow loading time after enter into LIVE state.
if len(self._data) == 0:
# INFO: Only call _fetch_ohlcv when self._data is fully consumed as it will cause execution
# inefficiency due to network latency. Furthermore it is extremely inefficiency to fetch
# an amount of bars but only load one bar at a given time.
self._fetch_ohlcv()
ret = self._load_ohlcv()
if self.p.debug:
print('---- LOAD ----')
print('{} Load OHLCV Returning: {}'.format(datetime.utcnow(), ret))
return ret
elif self._state == self._ST_HISTORBACK:
ret = self._load_ohlcv()
if ret:
return ret
else:
# End of historical data
if self.p.historical: # only historical
self.put_notification(self.DISCONNECTED)
self._state = self._ST_OVER
return False # end of historical
else:
self._state = self._ST_LIVE
self.put_notification(self.LIVE)
def _fetch_ohlcv(self, fromdate=None):
"""Fetch OHLCV data into self._data queue"""
granularity = self.store.get_granularity(self._timeframe, self._compression)
if self.store.cache is not None and self.p.todate:
print("Loading from cache", self.p.dataname, granularity, fromdate, self.p.todate)
data = sorted(self.store.cache.query(self.p.dataname, granularity, fromdate, self.p.todate))
if self.p.drop_newest:
del data[-1]
if len(data) > 0:
self._data.extend(data)
self._last_ts = data[-1][0]
else:
till = int((self.p.todate - datetime(1970, 1, 1)).total_seconds() * 1000) if self.p.todate else None
if fromdate:
since = int((fromdate - datetime(1970, 1, 1)).total_seconds() * 1000)
else:
if self._last_ts > 0:
if self._ts_delta is None:
since = self._last_ts
else:
since = self._last_ts + self._ts_delta
else:
since = None
limit = self.p.ohlcv_limit
while True:
dlen = len(self._data)
if self.p.debug:
# TESTING
since_dt = datetime.utcfromtimestamp(since // 1000) if since is not None else 'NA'
print('---- NEW REQUEST ----')
print('{} - Requesting: Since TS:{} Since date:{} granularity:{}, limit:{}, params:{}'.format(
datetime.utcnow(), since, since_dt, granularity, limit, self.p.fetch_ohlcv_params))
data = sorted(self.store.fetch_ohlcv(self.p.dataname, timeframe=granularity,
since=since, limit=limit, params=self.p.fetch_ohlcv_params))
try:
for i, ohlcv in enumerate(data):
tstamp, open_, high, low, close, volume = ohlcv
print('{} - Data {}: {} - TS {} Time {}'.format(datetime.utcnow(), i,
datetime.utcfromtimestamp(tstamp // 1000),
tstamp, (time.time() * 1000)))
# ------------------------------------------------------------------
except IndexError:
print('Index Error: Data = {}'.format(data))
print('---- REQUEST END ----')
else:
data = sorted(self.store.fetch_ohlcv(self.p.dataname, timeframe=granularity,
since=since, limit=limit, params=self.p.fetch_ohlcv_params))
# Check to see if dropping the latest candle will help with
# exchanges which return partial data
if self.p.drop_newest:
del data[-1]
prev_tstamp = None
tstamp = None
for ohlcv in data:
if None in ohlcv:
continue
tstamp = ohlcv[0]
if prev_tstamp is not None and self._ts_delta is None:
# INFO: Record down the TS delta so that it can be used to increment since TS
self._ts_delta = tstamp - prev_tstamp
# Prevent from loading incomplete data
# if tstamp > (time.time() * 1000):
# continue
if tstamp > self._last_ts:
if self.p.debug:
print('Adding: {}'.format(ohlcv))
self._data.append(ohlcv)
self._last_ts = tstamp
if till and tstamp >= till:
break
if prev_tstamp is None:
prev_tstamp = tstamp
# print("?", tstamp, till, dlen, len(self._data))
if till and tstamp is not None:
if tstamp >= till:
break
since = tstamp
if dlen == len(self._data):
break
def _load_ticks(self):
if self._last_id is None:
# first time get the latest trade only
trades = [self.store.fetch_trades(self.p.dataname)[-1]]
else:
trades = self.store.fetch_trades(self.p.dataname)
if len(trades) <= 1:
if len(trades) == 1:
trade = trades[0]
trade_time = datetime.strptime(trade['datetime'], '%Y-%m-%dT%H:%M:%S.%fZ')
self._data.append((trade_time, float(trade['price']), float(trade['amount'])))
else:
trade_dict_list = []
index = 0
# Since we only need the last 2 trades, just loop through the last 2 trades to speed up the for loop
for trade in trades[-2:]:
trade_id = trade['id']
if trade_id > self._last_id:
trade_time = datetime.strptime(trade['datetime'], '%Y-%m-%dT%H:%M:%S.%fZ')
trade_dict = dict(index=index, trade_time=trade_time, price=float(trade['price']),
amount=float(trade['amount']))
trade_dict_list.append(trade_dict)
self._last_id = trade_id
index += 1
if len(trade_dict_list) > 0:
# The order of self._data should be in reversed order by trade datetime
reverse = True
selection_key = 'index'
trade_dict_list.sort(key = lambda k: k[selection_key], reverse = reverse) # sorts in place
for trade_dict in trade_dict_list:
self._data.append((trade_dict['trade_time'], trade_dict['price'], trade_dict['amount']))
# Break here once we got the first data is sufficient as we only look for the first data
break
try:
trade = self._data.popleft()
except IndexError:
return None # no data in the queue
trade_time, price, size = trade
self.lines.datetime[0] = bt.date2num(trade_time)
self.lines.open[0] = price
self.lines.high[0] = price
self.lines.low[0] = price
self.lines.close[0] = price
self.lines.volume[0] = size
return True
def _load_ohlcv(self):
try:
ohlcv = self._data.popleft()
except IndexError:
return None # no data in the queue
tstamp, open_, high, low, close, volume = ohlcv
dtime = datetime.utcfromtimestamp(tstamp // 1000)
self.lines.datetime[0] = bt.date2num(dtime)
self.lines.open[0] = open_
self.lines.high[0] = high
self.lines.low[0] = low
self.lines.close[0] = close
self.lines.volume[0] = volume
return True
def haslivedata(self):
return self._state == self._ST_LIVE and self._data
def islive(self):
return not self.p.historical
|
"""This script creates some informative graphs on subgroups of income quartile, gender, and race."""
# %%
import os
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
# %%
# Set up folder path
code_folder = Path(os.path.abspath(''))
print(code_folder)
project_dir = os.path.dirname(code_folder)
os.chdir(project_dir)
print(project_dir)
# %%
from setup_fin_dataset import get_dataset
# %%
os.chdir(code_folder)
print(code_folder)
# %%
'''Plot scores by income quartile
'''
df = get_dataset()
#%%
df.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)
# %%
ax = plt.figure().add_subplot(111)
for group in ['first quartile', 'second quartile', 'third quartile', 'fourth quartile']:
cond = df['FAMILY_INCOME_QUARTILE'] == group
dat = df.loc[df['SURVEY_YEAR'] == 1978, ['AFQT_1']].loc[cond].dropna()
sns.distplot(dat, label=group.capitalize())
csfont = {'fontname':'Times New Roman'}
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_xlabel('AFQT Scores', **csfont)
ax.set_xlim([0, 120])
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-inc-quartile-afqt.png')
for score in ['ROTTER', 'ROSENBERG']:
ax = plt.figure().add_subplot(111)
for group in ['first quartile', 'second quartile', 'third quartile', 'fourth quartile']:
label = score + '_SCORE'
cond = df['FAMILY_INCOME_QUARTILE'] == group
dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()
sns.distplot(dat, label=group)
ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)
if score == 'ROTTER':
plt.gca().invert_xaxis()
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-inc-quartile-' + score.lower() + '.png')
# %%
'''Plot scores by gender
'''
df = get_dataset()
#%%
df.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)
ax = plt.figure().add_subplot(111)
for group in [1, 2]:
cond = df['GENDER'] == group
dat = df.loc[df['SURVEY_YEAR'] == 1978, ['AFQT_1']].loc[cond].dropna()
sns.distplot(dat, label=group)
csfont = {'fontname':'Times New Roman'}
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_xlabel('AFQT Scores', **csfont)
ax.set_xlim([0, 120])
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-aptitude-gender.png')
for score in ['ROTTER', 'ROSENBERG']:
ax = plt.figure().add_subplot(111)
for group in [1, 2]:
label = score + '_SCORE'
cond = df['GENDER'] == group
dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()
sns.distplot(dat, label=group)
ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)
if score == 'ROTTER':
plt.gca().invert_xaxis()
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-attitude-gender-' + score.lower() + '.png')
# %%
'''Plot scores by race
'''
df = get_dataset()
#%%
df.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)
ax = plt.figure().add_subplot(111)
for group in [1, 2, 3]:
cond = df['RACE'] == group
dat = df.loc[df['SURVEY_YEAR'] == 1978, ['AFQT_1']].loc[cond].dropna()
sns.distplot(dat, label=group)
csfont = {'fontname':'Times New Roman'}
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_xlabel('AFQT Scores', **csfont)
ax.set_xlim([0, 120])
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-aptitude-race.png')
for score in ['ROTTER', 'ROSENBERG']:
ax = plt.figure().add_subplot(111)
for group in [1, 2, 3]:
label = score + '_SCORE'
cond = df['RACE'] == group
dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()
sns.distplot(dat, label=group)
ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)
if score == 'ROTTER':
plt.gca().invert_xaxis()
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-attitude-race-' + score.lower() + '.png')
# %%
'''Plot by parental educational attainment, mother
'''
df = get_dataset()
#%%
df.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)
# %%
df['MOTHER_EDU'].nunique()
# %%
df['FATHER_EDU'].nunique()
# %%
df_mother = df.groupby('MOTHER_EDU')['IDENTIFIER'].nunique().sort_values(ascending=False)
df_mother
# %%
df_father = df.groupby('FATHER_EDU')['IDENTIFIER'].nunique().sort_values(ascending=False)
df_father
# %%
ax = plt.figure().add_subplot(111)
for group in ['Less than HS', 'HS or more']:
cond = df['MOTHER_EDU'] == group
dat = df['AFQT_1'].loc[cond].dropna()
sns.distplot(dat, label=group)
csfont = {'fontname':'Times New Roman'}
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_xlabel('AFQT Scores', **csfont)
ax.set_xlim([0, 120])
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-aptitude-mother-edu.png')
for score in ['ROTTER', 'ROSENBERG']:
ax = plt.figure().add_subplot(111)
for group in ['Less than HS', 'HS or more']:
label = score + '_SCORE'
cond = df['MOTHER_EDU'] == group
dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()
sns.distplot(dat, label=group)
ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)
if score == 'ROTTER':
plt.gca().invert_xaxis()
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-attitude-mother-edu-' + score.lower() + '.png')
# %%
'''Plot by parental educational attainment, father
'''
# %%
df = get_dataset()
#%%
df.dropna(axis=0, how='any', subset=['AFQT_1','ROSENBERG_SCORE', 'ROTTER_SCORE'], inplace=True)
ax = plt.figure().add_subplot(111)
for group in ['Less than HS', 'HS or more']:
cond = df['FATHER_EDU'] == group
dat = df['AFQT_1'].loc[cond].dropna()
sns.distplot(dat, label=group)
csfont = {'fontname':'Times New Roman'}
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_xlabel('AFQT Scores', **csfont)
ax.set_xlim([0, 120])
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-aptitude-father-edu.png')
for score in ['ROTTER', 'ROSENBERG']:
ax = plt.figure().add_subplot(111)
for group in ['Less than HS', 'HS or more']:
label = score + '_SCORE'
cond = df['FATHER_EDU'] == group
dat = df[cond].loc[df['SURVEY_YEAR'] == 1978, [label]].dropna()
sns.distplot(dat, label=group)
ax.set_xlabel(score.lower().capitalize() + ' Scores', **csfont)
if score == 'ROTTER':
plt.gca().invert_xaxis()
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.savefig('fig-attitude-father-edu-' + score.lower() + '.png')
# %%
|
import pytest
import logging
from traitlets.config.loader import PyFileConfigLoader
from traitlets import TraitError
from jupyter_telemetry.eventlog import EventLog
GOOD_CONFIG = """
import logging
c.EventLog.handlers = [
logging.StreamHandler()
]
"""
BAD_CONFIG = """
import logging
c.EventLog.handlers = [
0
]
"""
def get_config_from_file(path, content):
# Write config file
filename = 'config.py'
config_file = path / filename
config_file.write_text(content)
# Load written file.
loader = PyFileConfigLoader(filename, path=str(path))
cfg = loader.load_config()
return cfg
def test_good_config_file(tmp_path):
cfg = get_config_from_file(tmp_path, GOOD_CONFIG)
# Pass config to EventLog
e = EventLog(config=cfg)
# Assert the
assert len(e.handlers) > 0
assert isinstance(e.handlers[0], logging.Handler)
def test_bad_config_file(tmp_path):
cfg = get_config_from_file(tmp_path, BAD_CONFIG)
with pytest.raises(TraitError):
e = EventLog(config=cfg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.