text
stringlengths 2
999k
|
|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FlowLogsOperations:
"""FlowLogsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
network_watcher_name: str,
flow_log_name: str,
parameters: "_models.FlowLog",
**kwargs
) -> "_models.FlowLog":
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FlowLog')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FlowLog', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FlowLog', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
flow_log_name: str,
parameters: "_models.FlowLog",
**kwargs
) -> AsyncLROPoller["_models.FlowLog"]:
"""Create or update a flow log for the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param flow_log_name: The name of the flow log.
:type flow_log_name: str
:param parameters: Parameters that define the create or update flow log resource.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.FlowLog
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either FlowLog or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.FlowLog]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
flow_log_name=flow_log_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FlowLog', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
flow_log_name: str,
**kwargs
) -> "_models.FlowLog":
"""Gets a flow log resource by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param flow_log_name: The name of the flow log resource.
:type flow_log_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowLog, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.FlowLog
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLog"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FlowLog', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
flow_log_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
flow_log_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified flow log resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param flow_log_name: The name of the flow log resource.
:type flow_log_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
flow_log_name=flow_log_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'flowLogName': self._serialize.url("flow_log_name", flow_log_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs/{flowLogName}'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs
) -> AsyncIterable["_models.FlowLogListResult"]:
"""Lists all flow log resources for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FlowLogListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.FlowLogListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowLogListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('FlowLogListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/flowLogs'} # type: ignore
|
# encoding: utf-8
# STANDARD LIB
from unittest import skipIf
# THIRD PARTY
from django.apps.registry import apps # Apps
from django.conf import settings
from django.db import connection, models
from django.db.migrations.state import ProjectState
from django.test import override_settings
from google.appengine.api import datastore
from google.appengine.runtime import DeadlineExceededError
# DJANGAE
from djangae.contrib import sleuth
from djangae.db.migrations import operations
from djangae.db.migrations.mapper_library import (
_get_range,
_mid_key,
_mid_string,
_next_string,
shard_query,
ShardedTaskMarker,
start_mapping,
)
from djangae.test import TestCase
# Workaround for https://code.djangoproject.com/ticket/28188
def return_a_string():
return "squirrel"
class TestModel(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = "djangae"
class OtherModel(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = "djangae"
class OtherAppModel(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = "testapp"
class UniqueException(Exception):
""" An exception which we can explicity throw and catch. """
pass
def tickle_entity(entity):
entity['is_tickled'] = True
datastore.Put(entity)
def tickle_entity_volitle(entity):
""" Like `tickle_entity`, but raises DeadlineExceededError every 3rd call. """
call_count = getattr(tickle_entity_volitle, "call_count", 1)
tickle_entity_volitle.call_count = call_count + 1
if call_count % 3 == 0:
raise DeadlineExceededError()
else:
tickle_entity(entity)
def flush_task_markers():
""" Delete all ShardedTaskMarker objects from the DB.
Useful to call in setUp(), as Django doesn't wipe this kind because there's
no model for it.
"""
namespaces = set()
namespaces.add(settings.DATABASES['default'].get('NAMESPACE', ''))
namespaces.add(settings.DATABASES.get('ns1', {}).get('NAMESPACE', ''))
for namespace in namespaces:
query = datastore.Query(
ShardedTaskMarker.KIND,
namespace=namespace,
keys_only=True
).Run()
datastore.Delete([x for x in query])
class MigrationOperationTests(TestCase):
multi_db = True
def setUp(self):
# We need to clean out the migration task markers from the Datastore between each test, as
# the standard flush only cleans out models
super(MigrationOperationTests, self).setUp()
flush_task_markers()
def start_operation(self, operation, detonate=True):
# Make a from_state and a to_state to pass to the operation, these can just be the
# current state of the models
from_state = ProjectState.from_apps(apps)
to_state = from_state.clone()
schema_editor = connection.schema_editor()
app_label = TestModel._meta.app_label
# If we just start the operation then it will hang forever waiting for its mapper task to
# complete, so we won't even be able to call process_task_queues(). So to avoid that we
# detonate the _wait_until_task_finished method. Then tasks can be processed after that.
if detonate:
with sleuth.detonate(
"djangae.tests.test_migrations.operations.%s._wait_until_task_finished" % operation.__class__.__name__,
UniqueException
):
try:
operation.database_forwards(app_label, schema_editor, from_state, to_state)
except UniqueException:
pass
else:
operation.database_forwards(app_label, schema_editor, from_state, to_state)
def get_entities(self, model=TestModel, namespace=None):
namespace = namespace or settings.DATABASES['default'].get('NAMESPACE', '')
query = datastore.Query(
model._meta.db_table,
namespace=namespace,
)
return [x for x in query.Run()]
def test_run_operation_creates_and_updates_task_marker(self):
""" If we run one of our custom operations, then it should create the task marker in the DB
and defer a task, then set the marker to 'is_finished' when done.
"""
TestModel.objects.create()
operation = operations.AddFieldData(
"testmodel", "new_field", models.CharField(max_length=100, default="squirrel")
)
self.start_operation(operation)
# Now check that the task marker has been created.
# Usefully, calling database_forwards() on the operation will have caused it to set the
# `identifier` attribute on itself, meaning we can now just call _get_task_marker()
task_marker = datastore.Get(
[ShardedTaskMarker.get_key(operation.identifier, operation.namespace)]
)[0]
if task_marker is None:
self.fail("Migration operation did not create its task marker")
self.assertFalse(task_marker.get("is_finished"))
self.assertNumTasksEquals(1)
self.process_task_queues()
# Now check that the task marker has been marked as finished
task_marker = datastore.Get(
[ShardedTaskMarker.get_key(operation.identifier, operation.namespace)]
)[0]
self.assertTrue(task_marker["is_finished"])
self.assertNumTasksEquals(0)
def test_starting_operation_twice_does_not_trigger_task_twice(self):
""" If we run an operation, and then try to run it again before the task has finished
processing, then it should not trigger a second task.
"""
TestModel.objects.create()
operation = operations.AddFieldData(
"testmodel", "new_field", models.CharField(max_length=100, default="squirrel")
)
self.start_operation(operation)
task_marker = datastore.Get(
ShardedTaskMarker.get_key(operation.identifier, operation.namespace)
)
self.assertFalse(task_marker["is_finished"])
# We expect there to be a task queued for processing the operation
self.assertNumTasksEquals(1)
# Now try to run it again
self.start_operation(operation)
# We expect there to still be the same number of tasks
self.assertNumTasksEquals(1)
def test_running_finished_operation_does_not_trigger_new_task(self):
""" If we re-trigger an operation which has already been run and finished, it should simply
return without starting a new task or updating the task marker.
"""
TestModel.objects.create()
operation = operations.AddFieldData(
"testmodel", "new_field", models.CharField(max_length=100, default="squirrel")
)
# Run the operation and check that it finishes
with sleuth.watch("djangae.db.migrations.operations.AddFieldData._start_task") as start:
self.start_operation(operation)
self.assertTrue(start.called)
task_marker = datastore.Get(
ShardedTaskMarker.get_key(operation.identifier, operation.namespace)
)
self.assertFalse(task_marker["is_finished"])
self.assertNumTasksEquals(1)
self.process_task_queues()
task_marker = datastore.Get(
ShardedTaskMarker.get_key(operation.identifier, operation.namespace)
)
self.assertTrue(task_marker["is_finished"])
# Run the operation again. It should see that's it's finished and just return immediately.
self.assertNumTasksEquals(0)
with sleuth.watch("djangae.db.migrations.operations.AddFieldData._start_task") as start:
self.start_operation(operation, detonate=False)
self.assertFalse(start.called)
self.assertNumTasksEquals(0)
task_marker = datastore.Get(
ShardedTaskMarker.get_key(operation.identifier, operation.namespace)
)
self.assertTrue(task_marker["is_finished"])
def test_queue_option(self):
""" The `queue` kwarg should determine the task queue that the operation runs on. """
for x in xrange(3):
TestModel.objects.create()
operation = operations.AddFieldData(
"testmodel", "new_field", models.CharField(max_length=100, default=return_a_string),
queue="another",
# Ensure that we trigger a re-defer, so that we test that the correct queue is used for
# subsequent tasks, not just the first one
entities_per_task=1,
shard_count=1
)
self.start_operation(operation)
# The task(s) should not be in the default queue, but in the "another" queue instead
self.assertEqual(self.get_task_count("default"), 0)
self.assertTrue(self.get_task_count("another") > 0)
# And if we only run the tasks on the "another" queue, the whole operation should complete.
self.process_task_queues("another")
# And the entities should be updated
entities = self.get_entities()
self.assertTrue(all(entity['new_field'] == 'squirrel' for entity in entities))
def test_default_queue_setting(self):
""" If no `queue` kwarg is passed then the DJANGAE_MIGRATION_DEFAULT_QUEUE setting should
be used to determine the task queue.
"""
for x in xrange(2):
TestModel.objects.create()
operation = operations.AddFieldData(
"testmodel", "new_field", models.CharField(max_length=100, default="squirrel"),
)
# Check that starting the operation with a different setting correctly affects the queue.
# Note that here we don't check that *all* tasks go on the correct queue, just the first
# one. We test that more thoroughly in `test_queue_option` above.
with override_settings(DJANGAE_MIGRATION_DEFAULT_QUEUE="another"):
self.start_operation(operation)
self.assertEqual(self.get_task_count("default"), 0)
self.assertTrue(self.get_task_count("another") > 0)
self.flush_task_queues()
flush_task_markers()
# santity checks:
assert getattr(settings, "DJANGAE_MIGRATION_DEFAULT_QUEUE", None) is None
assert self.get_task_count() == 0
# Trigger the operation without that setting. The task(s) should go on the default queue.
self.start_operation(operation)
self.assertTrue(self.get_task_count("default") > 0)
def test_uid_allows_separate_identical_operations_to_be_run(self):
""" By passing the 'uid' kwarg to an operation, we should allow it to be run, even if an
otherwise idential operation has already been run.
"""
operation1 = operations.AddFieldData(
"testmodel", "new_field", models.BooleanField(default=True)
)
operation2 = operations.AddFieldData(
"testmodel", "new_field", models.BooleanField(default=True)
)
operation3 = operations.AddFieldData(
"testmodel", "new_field", models.BooleanField(default=True), uid="x"
)
# Create a model instance and run the first operation on it
instance = TestModel.objects.create()
self.start_operation(operation1)
self.process_task_queues()
# Check that the migration ran successfully
entity = self.get_entities()[0]
self.assertTrue(entity["new_field"])
# Now create another entity and make sure that the second migration (which is idential)
# does NOT run on it
instance.delete()
instance = TestModel.objects.create()
self.start_operation(operation2)
self.process_task_queues()
entity = self.get_entities()[0]
self.assertIsNone(entity.get("new_field"))
# Now run the third operation, which is identical but has a uid, so SHOULD be run
self.start_operation(operation3)
self.process_task_queues()
entity = self.get_entities()[0]
self.assertTrue(entity["new_field"])
def test_addfielddata(self):
""" Test the AddFieldData operation. """
for x in xrange(2):
TestModel.objects.create()
# Just for sanity, check that none of the entities have the new field value yet
entities = self.get_entities()
self.assertFalse(any(entity.get("new_field") for entity in entities))
operation = operations.AddFieldData(
"testmodel", "new_field", models.CharField(max_length=100, default="squirrel")
)
self.start_operation(operation)
self.process_task_queues()
# The entities should now all have the 'new_field' actually mapped over
entities = self.get_entities()
self.assertTrue(all(entity['new_field'] == 'squirrel' for entity in entities))
def test_removefielddata(self):
""" Test the RemoveFieldData operation. """
for x in xrange(2):
TestModel.objects.create(name="name_%s" % x)
# Just for sanity, check that all of the entities have `name` value
entities = self.get_entities()
self.assertTrue(all(entity["name"] for entity in entities))
operation = operations.RemoveFieldData(
"testmodel", "name", models.CharField(max_length=100)
)
self.start_operation(operation)
self.process_task_queues()
# The entities should now all have the 'name' value removed
entities = self.get_entities()
self.assertFalse(any(entity.get("name") for entity in entities))
def test_copyfielddata(self):
""" Test the CopyFieldData operation. """
for x in xrange(2):
TestModel.objects.create(name="name_%s" % x)
# Just for sanity, check that none of the entities have the new "new_field" value
entities = self.get_entities()
self.assertFalse(any(entity.get("new_field") for entity in entities))
operation = operations.CopyFieldData(
"testmodel", "name", "new_field"
)
self.start_operation(operation)
self.process_task_queues()
# The entities should now all have the "new_field" value
entities = self.get_entities()
self.assertTrue(all(entity["new_field"] == entity["name"] for entity in entities))
def test_deletemodeldata(self):
""" Test the DeleteModelData operation. """
for x in xrange(2):
TestModel.objects.create()
# Just for sanity, check that the entities exist!
entities = self.get_entities()
self.assertEqual(len(entities), 2)
operation = operations.DeleteModelData("testmodel")
self.start_operation(operation)
self.process_task_queues()
# The entities should now all be gone
entities = self.get_entities()
self.assertEqual(len(entities), 0)
def test_copymodeldata_overwrite(self):
""" Test the CopyModelData operation with overwrite_existing=True. """
# Create the TestModel instances, with OtherModel instances with matching PKs
for x in xrange(2):
instance = TestModel.objects.create(name="name_which_will_be_copied")
OtherModel.objects.create(name="original_name", id=instance.pk)
# Just for sanity, check that the entities exist
testmodel_entities = self.get_entities()
othermodel_entities = self.get_entities(model=OtherModel)
self.assertEqual(len(testmodel_entities), 2)
self.assertEqual(len(othermodel_entities), 2)
operation = operations.CopyModelData(
"testmodel", "djangae", "othermodel", overwrite_existing=True
)
self.start_operation(operation)
self.process_task_queues()
# The OtherModel entities should now all have a name lof "name_which_will_be_copied"
othermodel_entities = self.get_entities(model=OtherModel)
self.assertTrue(all(
entity["name"] == "name_which_will_be_copied" for entity in othermodel_entities
))
def test_copymodeldata_no_overwrite(self):
""" Test the CopyModelData operation with overwrite_existing=False. """
# Create the TestModel instances, with OtherModel instances with matching PKs only for
# odd PKs
for x in xrange(1, 5):
TestModel.objects.create(id=x, name="name_which_will_be_copied")
if x % 2:
OtherModel.objects.create(id=x, name="original_name")
# Just for sanity, check that the entities exist
testmodel_entities = self.get_entities()
othermodel_entities = self.get_entities(model=OtherModel)
self.assertEqual(len(testmodel_entities), 4)
self.assertEqual(len(othermodel_entities), 2)
operation = operations.CopyModelData(
"testmodel", "djangae", "othermodel", overwrite_existing=False
)
self.start_operation(operation)
self.process_task_queues()
# We now expect there to be 4 OtherModel entities, but only the ones which didn't exist
# already (i.e. the ones with even PKs) should have the name copied from the TestModel
othermodel_entities = self.get_entities(model=OtherModel)
self.assertEqual(len(othermodel_entities), 4)
for entity in othermodel_entities:
if entity.key().id() % 2:
self.assertEqual(entity["name"], "original_name")
else:
self.assertEqual(entity["name"], "name_which_will_be_copied")
@skipIf("ns1" not in settings.DATABASES, "This test is designed for the Djangae testapp settings")
def test_copymodeldatatonamespace_overwrite(self):
""" Test the CopyModelDataToNamespace operation with overwrite_existing=True. """
ns1 = settings.DATABASES["ns1"]["NAMESPACE"]
# Create instances, with copies in the other namespace with matching IDs
for x in xrange(2):
instance = TestModel.objects.create(name="name_which_will_be_copied")
instance.save(using="ns1")
# Just for sanity, check that the entities exist
entities = self.get_entities()
ns1_entities = self.get_entities(namespace=ns1)
self.assertEqual(len(entities), 2)
self.assertEqual(len(ns1_entities), 2)
operation = operations.CopyModelDataToNamespace(
"testmodel", ns1, overwrite_existing=True
)
self.start_operation(operation)
self.process_task_queues()
# The entities in ns1 should now all have a name lof "name_which_will_be_copied"
ns1_entities = self.get_entities(namespace=ns1)
self.assertTrue(all(
entity["name"] == "name_which_will_be_copied" for entity in ns1_entities
))
@skipIf("ns1" not in settings.DATABASES, "This test is designed for the Djangae testapp settings")
def test_copymodeldatatonamespace_no_overwrite(self):
""" Test the CopyModelDataToNamespace operation with overwrite_existing=False. """
ns1 = settings.DATABASES["ns1"]["NAMESPACE"]
# Create the TestModel instances, with OtherModel instances with matching PKs only for
# odd PKs
for x in xrange(1, 5):
TestModel.objects.create(id=x, name="name_which_will_be_copied")
if x % 2:
ns1_instance = TestModel(id=x, name="original_name")
ns1_instance.save(using="ns1")
# Just for sanity, check that the entities exist
entities = self.get_entities()
ns1_entities = self.get_entities(namespace=ns1)
self.assertEqual(len(entities), 4)
self.assertEqual(len(ns1_entities), 2)
operation = operations.CopyModelDataToNamespace(
"testmodel", ns1, overwrite_existing=False
)
self.start_operation(operation)
self.process_task_queues()
# We now expect there to be 4 entities in the new namespace, but only the ones which didn't
# exist already (i.e. the ones with even PKs) should have their `name` updated
ns1_entities = self.get_entities(namespace=ns1)
self.assertEqual(len(ns1_entities), 4)
for entity in ns1_entities:
if entity.key().id() % 2:
self.assertEqual(entity["name"], "original_name")
else:
self.assertEqual(entity["name"], "name_which_will_be_copied")
@skipIf(
"ns1" not in settings.DATABASES or "testapp" not in settings.INSTALLED_APPS,
"This test is designed for the Djangae testapp settings"
)
def test_copymodeldatatonamespace_new_app_label(self):
""" Test the CopyModelDataToNamespace operation with new data being saved to a new model in
a new app as well as in a new namespace.
"""
ns1 = settings.DATABASES["ns1"]["NAMESPACE"]
for x in xrange(2):
TestModel.objects.create(name="name_which_will_be_copied")
# Just for sanity, check that the entities exist
entities = self.get_entities()
new_entities = self.get_entities(model=OtherAppModel, namespace=ns1)
self.assertEqual(len(entities), 2)
self.assertEqual(len(new_entities), 0)
operation = operations.CopyModelDataToNamespace(
"testmodel", ns1, to_app_label="testapp", to_model_name="otherappmodel"
)
self.start_operation(operation)
self.process_task_queues()
# The entities in ns1 should now all have a name lof "name_which_will_be_copied"
new_entities = self.get_entities(model=OtherAppModel, namespace=ns1)
self.assertEqual(len(new_entities), 2)
self.assertTrue(all(
entity["name"] == "name_which_will_be_copied" for entity in new_entities
))
def test_mapfunctiononentities(self):
""" Test the MapFunctionOnEntities operation. """
for x in xrange(2):
TestModel.objects.create()
# Test that our entities have not had our function called on them
entities = self.get_entities()
self.assertFalse(any(entity.get("is_tickled") for entity in entities))
operation = operations.MapFunctionOnEntities("testmodel", tickle_entity)
self.start_operation(operation)
self.process_task_queues()
entities = self.get_entities()
self.assertEqual(len(entities), 2)
self.assertTrue(all(entity.get("is_tickled") for entity in entities))
class MidStringTestCase(TestCase):
""" Tests for the _mid_string function in the mapper_library. """
def test_handles_args_in_either_order(self):
""" It shouldn't matter whether we pass the "higher" string as the first or second param. """
low = "aaaaa"
high = "zzzzz"
mid1 = _mid_string(low, high)
mid2 = _mid_string(low, high)
self.assertEqual(mid1, mid2)
self.assertTrue(low < mid1 < high)
def test_basic_behaviour(self):
""" Test finding the midpoint between two string in an obvious case. """
start = "a"
end = "c"
self.assertEqual(_mid_string(start, end), "b")
def test_slightly_less_basic_behaviour(self):
start = "aaaaaaaaaaaa"
end = "z"
mid_low_apprx = "l"
mid_high_apprx = "n"
result = _mid_string(start, end)
self.assertTrue(mid_low_apprx < result < mid_high_apprx)
def test_handles_strings_of_different_lengths(self):
""" Strings of different lengths should return another of a length mid way between """
start = "aaa"
end = "zzzzzzzzzzzzz"
mid = _mid_string(start, end)
self.assertTrue(start < mid < end)
def test_handles_unicode(self):
""" It should be able to do comparisions on non-ascii strings. """
start = u"aaa£¢$›😇"
end = u"zzz🤡"
mid = _mid_string(start, end)
self.assertTrue(start < mid < end)
def test_does_not_return_string_starting_with_double_underscore(self):
""" A string that starts with a double underscore is not a valid Datastore key and so
should not be returned.
"""
# The true mid point between this start and end combination is a double underscore
start = "^^"
end = "``"
result = _mid_string(start, end)
self.assertNotEqual(result, "__")
class MidKeyTestCase(TestCase):
""" Tests for the `_mid_key` function. """
def test_mixed_integers_and_strings_not_allowed(self):
""" Finding the mid point between keys of different types is not currently supported and
should therefore raise an error.
"""
key1 = datastore.Key.from_path("my_kind", 1)
key2 = datastore.Key.from_path("my_kind", "1")
self.assertRaises(NotImplementedError, _mid_key, key1, key2)
def test_mid_integer_key(self):
""" Given 2 keys with integer `id_or_name` values, the returned key should have an
`id_or_name` which is an integer somewhere between the two.
"""
key1 = datastore.Key.from_path("my_kind", 1)
key2 = datastore.Key.from_path("my_kind", 100)
result = _mid_key(key1, key2)
self.assertEqual(result.kind(), key1.kind())
self.assertEqual(result.namespace(), key1.namespace())
self.assertTrue(1 < result.id_or_name() < 100)
def test_mid_string_key(self):
""" Given 2 keys with string `id_or_name` values, the returned key should have an
`id_or_name` which is a string somewhere between the two.
"""
key1 = datastore.Key.from_path("my_kind", "1")
key2 = datastore.Key.from_path("my_kind", "100")
result = _mid_key(key1, key2)
self.assertEqual(result.kind(), key1.kind())
self.assertEqual(result.namespace(), key1.namespace())
self.assertTrue("1" < result.id_or_name() < "100")
class NextStringTestCase(TestCase):
""" Tests for the _next_string function in the mapper_library. """
def test_basic_behaviour(self):
try:
unichr(65536)
# Python wide-unicode build (Linux) UTF-32
highest_unicode_char = unichr(0x10ffff)
except ValueError:
# Python narrow build (OSX)
# Python 2 using 16 bit unicode, so the highest possible character is (2**16) - 1
highest_unicode_char = unichr(2 ** 16 - 1)
checks = (
# Pairs of (input, expected_output)
("a", "b"),
("aaaa", "aaab"),
# unichr((2 ** 32) - 1) is the last possible unicode character
(highest_unicode_char, highest_unicode_char + unichr(1)),
(u"aaa" + highest_unicode_char, u"aaa" + highest_unicode_char + unichr(1)),
)
for input_text, expected_output in checks:
self.assertEqual(_next_string(input_text), expected_output)
class GetKeyRangeTestCase(TestCase):
""" Tests for the `_get_range` function. """
def test_integer_range(self):
""" Given 2 integer-based keys, it should return the range that the IDs span. """
key1 = datastore.Key.from_path("my_kind", 4012809128)
key2 = datastore.Key.from_path("my_kind", 9524773032)
self.assertEqual(_get_range(key1, key2), 9524773032 - 4012809128)
def test_string_range(self):
""" Given 2 string-based keys, it should return a representation of the range that the two
keys span.
"""
key1 = datastore.Key.from_path("my_kind", "a")
key2 = datastore.Key.from_path("my_kind", "b")
# The difference between "a" and "b" is 1 character
self.assertEqual(_get_range(key1, key2), unichr(1))
def test_mixed_keys_cause_exception(self):
""" Trying to get a range between 2 keys when one is an integer and the other is a string
should cause an explosion.
"""
key1 = datastore.Key.from_path("my_kind", "a")
key2 = datastore.Key.from_path("my_kind", 12345)
self.assertRaises(Exception, _get_range, key1, key2)
class ShardQueryTestCase(TestCase):
""" Tests for the `shard_query` function. """
def test_query_sharding(self):
ns1 = settings.DATABASES["default"]["NAMESPACE"]
for x in xrange(1, 21):
TestModel.objects.create(pk=x)
qry = datastore.Query(TestModel._meta.db_table, namespace=ns1)
shards = shard_query(qry, 1)
self.assertEqual(1, len(shards))
shards = shard_query(qry, 20)
self.assertEqual(20, len(shards))
shards = shard_query(qry, 50)
# We can't create 50 shards if there are only 20 objects
self.assertEqual(20, len(shards))
class MapperLibraryTestCase(TestCase):
""" Tests which check the behaviour of the mapper library directly. """
def setUp(self):
# We need to clean out the migration task markers from the Datastore between each test, as
# the standard flush only cleans out models
super(MapperLibraryTestCase, self).setUp()
flush_task_markers()
def _get_testmodel_query(self, db="default"):
namespace = settings.DATABASES[db].get('NAMESPACE', '')
return datastore.Query(
TestModel._meta.db_table,
namespace=namespace
)
def _get_taskmarker_query(self, namespace=""):
return datastore.Query("ShardedTaskMarker", namespace=namespace)
def test_basic_processing(self):
""" Test that calling `start_mapping` with some sensible parameters will do the right
processing.
"""
objs = []
for x in xrange(2):
objs.append(TestModel(name="Test-%s" % x))
TestModel.objects.bulk_create(objs)
start_mapping("my_lovely_mapper", self._get_testmodel_query(), tickle_entity)
self.process_task_queues()
# And check that every entity has been tickled
self.assertTrue(all(e['is_tickled'] for e in self._get_testmodel_query().Run()))
def test_cannot_start_same_mapping_twice(self):
""" Calling `start_mapping` with the same parameters twice then it should NOT create 2
mappers.
"""
objs = []
for x in xrange(2):
objs.append(TestModel(name="Test-%s" % x))
TestModel.objects.bulk_create(objs)
assert self._get_taskmarker_query().Count() == 0 # Sanity
marker = start_mapping("my_test_mapper", self._get_testmodel_query(), tickle_entity)
task_count = self.get_task_count()
assert marker # Sanity
assert task_count # Sanity
# Now try to defer the same mapper again
marker = start_mapping("my_test_mapper", self._get_testmodel_query(), tickle_entity)
# That shouldn't have worked, so the number of tasks should remain unchanged
self.assertEqual(self.get_task_count(), task_count)
# And it should not have returned a marker
self.assertIsNone(marker)
def test_can_start_same_mapping_in_2_different_namespaces(self):
""" Calling `start_mapping` with the same parameters but with different namespaces on the
query should work and correctly defer 2 processing tasks.
"""
dbs = ("default", "ns1")
# Create some objects in 2 different namespaces
for db in dbs:
objs = []
for x in xrange(2):
objs.append(TestModel(name="Test-%s" % x))
TestModel.objects.using(db).bulk_create(objs)
# Start the same mapper twice but in 2 different namespaces, and check that they both work
current_task_count = self.get_task_count()
markers = set()
for db in dbs:
marker = start_mapping("my_test_mapper", self._get_testmodel_query(db), tickle_entity)
self.assertIsNotNone(marker)
self.assertFalse(marker in markers)
markers.add(marker)
new_task_count = self.get_task_count()
self.assertTrue(new_task_count > current_task_count)
current_task_count = new_task_count
def test_mapper_will_continue_after_deadline_exceeded_error(self):
""" If DeadlineExceededError is encountered when processing one of the entities, the mapper
should redefer and continue.
"""
objs = []
for x in xrange(8):
objs.append(TestModel(name="Test-%s" % x))
TestModel.objects.bulk_create(objs)
identifier = "my_test_mapper"
query = self._get_testmodel_query()
# Reset the call_count on tickle_entity_volitle. We can't use sleuth.watch because a
# wrapped function can't be pickled
tickle_entity_volitle.call_count = 0
# Run the mapper and run all the tasks
start_mapping(
identifier, query, tickle_entity_volitle, shard_count=1,
)
self.process_task_queues()
# Check that the tickle_entity_volitle function was called more times than there are
# entities (because some calls should have failed and been retried)
# self.assertTrue(tickle_entity_volitle.call_count > TestModel.objects.count())
# And check that every entity has been tickled
self.assertTrue(all(e['is_tickled'] for e in self._get_testmodel_query().Run()))
|
import csv
import json
csvfile = open('20180308.csv', 'r')
jsonfile = open('20180308.json', 'w')
reader = csv.DictReader(csvfile)
out = json.dumps( [ row for row in reader ] )
jsonfile.write(out)
|
import json
import os
from typing import Mapping
_TESTS_DIR_PATH = os.path.dirname(__file__)
def get_test_file_path(path: str) -> str:
filepath = os.path.join(
_TESTS_DIR_PATH,
path,
)
return filepath
def read_test_file_bytes(path: str) -> bytes:
filepath = os.path.join(
_TESTS_DIR_PATH,
path,
)
with open(filepath, mode='rb') as file:
content = file.read()
return content
def read_test_file_str_ascii(path: str) -> str:
filepath = os.path.join(
_TESTS_DIR_PATH,
path,
)
with open(filepath, mode='rt', encoding='ascii') as file:
content = file.read()
return content
def read_test_file_str_utf8(path: str) -> str:
filepath = os.path.join(
_TESTS_DIR_PATH,
path,
)
with open(filepath, mode='rt', encoding='utf8') as file:
content = file.read()
return content
def read_test_file_json_dict(path: str) -> Mapping[str, object]:
filepath = os.path.join(
_TESTS_DIR_PATH,
path,
)
with open(filepath, mode='rb') as file:
content = json.load(file)
if isinstance(content, Mapping):
return content
else:
raise TypeError(
f"Expected JSON file content to be a 'Mapping', not a '{content.__class__.__name__}'.",
)
|
import os
import sys
import json
import jsonschema
from subprocess import Popen, PIPE
from threading import Thread
from traceback import format_exc
from cc_container_worker.application_container.telemetry import Telemetry
from cc_container_worker.commons.data import ac_download, ac_upload, tracing_upload
from cc_container_worker.commons.callbacks import CallbackHandler
from cc_container_worker.commons.schemas import application_config_schema
CONFIG_FILE_PATH = os.path.join(os.path.expanduser('~'), '.config', 'cc-container-worker', 'config.json')
LOCAL_TRACING_FILE = {
'dir': '/var/tmp/cc-tracing',
'name': 'data.csv',
'optional': True
}
def main():
settings = json.loads(sys.argv[1])
callback_handler = CallbackHandler(settings, container_type='application')
config = None
try:
with open(CONFIG_FILE_PATH) as f:
config = json.load(f)
jsonschema.validate(config, application_config_schema)
except:
description = 'Could not load JSON config file from path {}'.format(CONFIG_FILE_PATH)
callback_handler.send_callback(
callback_type='started', state='failed', description=description, exception=format_exc()
)
exit(3)
for key, val in config['local_result_files'].items():
try:
if not os.path.exists(val['dir']):
os.makedirs(val['dir'])
except:
pass
description = 'Container started.'
additional_settings = callback_handler.send_callback(
callback_type='started', state='success', description=description
)
meta_data = {
'application_container_id': settings['container_id'],
'task_id': additional_settings['task_id']
}
input_files = additional_settings['input_files']
result_files = additional_settings['result_files']
if len(input_files) != len(config['local_input_files']):
description = 'Number of local_input_files in config does not match input_files.'
callback_handler.send_callback(callback_type='files_retrieved', state='failed', description=description)
exit(5)
try:
ac_download(input_files, config['local_input_files'])
except:
description = 'Could not retrieve input files.'
callback_handler.send_callback(
callback_type='files_retrieved', state='failed', description=description, exception=format_exc()
)
exit(6)
description = 'Input files retrieved.'
callback_handler.send_callback(callback_type='files_retrieved', state='success', description=description)
telemetry_data = None
application_command = config['application_command']
try:
if additional_settings.get('parameters'):
if isinstance(additional_settings['parameters'], dict):
application_command = '{} \'{}\''.format(
application_command,
json.dumps(additional_settings['parameters'])
)
elif isinstance(additional_settings['parameters'], list):
application_command += ''.join([' {}'.format(val) for val in additional_settings['parameters']])
else:
raise Exception('Type of parameters not valid: {}'.format(type(additional_settings['parameters'])))
preexec_fn = None
if additional_settings.get('sandbox'):
from cc_container_worker.application_container.sandbox import Sandbox
sandbox = Sandbox(config=additional_settings.get('sandbox'))
preexec_fn = sandbox.enter
if additional_settings.get('tracing'):
from cc_container_worker.application_container.tracing import Tracing
if not os.path.exists(LOCAL_TRACING_FILE['dir']):
os.makedirs(LOCAL_TRACING_FILE['dir'])
local_tracing_file_path = os.path.join(LOCAL_TRACING_FILE['dir'], LOCAL_TRACING_FILE['name'])
sp = Popen(application_command, stdout=PIPE, stderr=PIPE, shell=True, preexec_fn=preexec_fn)
tracing = Tracing(sp.pid, config=additional_settings.get('tracing'), outfile=local_tracing_file_path)
tracing.start()
telemetry = Telemetry(sp, config=config)
t = Thread(target=telemetry.monitor)
t.start()
std_out, std_err = sp.communicate()
tracing.finish()
else:
sp = Popen(application_command, stdout=PIPE, stderr=PIPE, shell=True, preexec_fn=preexec_fn)
telemetry = Telemetry(sp, config=config)
t = Thread(target=telemetry.monitor)
t.start()
std_out, std_err = sp.communicate()
return_code = sp.returncode
# Collect telemetry data
telemetry_data = telemetry.result()
if std_out:
telemetry_data['std_out'] = str(std_out)
if std_err:
telemetry_data['std_err'] = str(std_err)
telemetry_data['return_code'] = return_code
except:
callback_handler.send_callback(
callback_type='processed', state='failed', description='Processing failed.', exception=format_exc()
)
exit(8)
description = 'Processing succeeded.'
state = 'success'
exception = None
if return_code != 0:
description = 'Processing failed.'
state = 'failed'
try:
if additional_settings.get('tracing'):
tracing_file = additional_settings['tracing'].get('tracing_file')
if tracing_file:
tracing_upload(tracing_file, LOCAL_TRACING_FILE, meta_data)
except:
if return_code != 0:
description = 'Processing failed and tracing file upload failed.'
else:
description = 'Tracing file upload failed.'
state = 'failed'
exception = format_exc()
callback_handler.send_callback(
callback_type='processed',
state=state,
description=description,
exception=exception,
telemetry=telemetry_data,
)
if return_code != 0:
exit(9)
try:
ac_upload(result_files, config['local_result_files'], meta_data)
except:
description = 'Could not send result files.'
callback_handler.send_callback(
callback_type='results_sent', state='failed', description=description, exception=format_exc()
)
exit(10)
callback_handler.send_callback(
callback_type='results_sent', state='success', description='Result files sent.'
)
if __name__ == '__main__':
main()
|
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import math
import scipy.stats as stats
def inter_p_value(p_value):
# interpretation
if p_value >= 0 and p_value < 0.01:
inter_p = 'Overwhelming Evidence'
elif p_value >= 0.01 and p_value < 0.05:
inter_p = 'Strong Evidence'
elif p_value >= 0.05 and p_value < 0.1:
inter_p = 'Weak Evidence'
elif p_value >= .1:
inter_p = 'No Evidence'
return inter_p
def grank(data):
if type(data) == np.ndarray or type(data) == list:
alldata = data.copy()
data = data.copy()
else:
alldata = data.values.copy()
data = data.values.copy()
alldata.sort()
tmp_df = pd.DataFrame({'value': alldata})
tmp_df['rank'] = tmp_df.index + 1
value_to_rank = tmp_df.groupby('value').mean().reset_index()
samp = pd.DataFrame({'value': data})
samp = pd.merge(samp, value_to_rank, how='left')
return samp['rank']
def ranksum_z_test(df=None, to_compute='', alternative=None, precision=4, alpha=0.05):
"""
df can only have two columns and df.shape[0] > 10
alternative has three options: 'two-sided', 'less', 'greater'
"""
# sort all data points by values
tmp_values = df.values.reshape(-1)
tmp_values = tmp_values[~np.isnan(tmp_values)]
tmp_values.sort()
# assign ranks
updated_df = pd.DataFrame({'value': tmp_values})
updated_df['rank'] = updated_df.index + 1
# average rank for identical value
updated_df = updated_df.groupby('value').mean().reset_index()
# display(updated_df)
# Compute Sum of Ranks
samp1 = pd.DataFrame({'value': df[to_compute].dropna().values})
samp1 = pd.merge(samp1, updated_df)
T = samp1['rank'].sum()
# compute mean and standard deviation
n1 = df.iloc[:, 0].dropna().shape[0]
n2 = df.iloc[:, 1].dropna().shape[0]
E_T = n1*(n1+n2+1)/2
sigmaT = (n1*n2*(n1+n2+1)/12) ** 0.5
z = (T-E_T)/sigmaT
# compute p-value
# right (greater)
p_value = 1 - stats.norm.cdf(z)
if alternative == 'greater':
pass
elif alternative == 'less':
p_value = stats.norm.cdf(z)
elif alternative == 'two-sided':
# two-tail
if p_value > 0.5:
p_value = stats.norm.cdf(z)
p_value *= 2
flag = False
if p_value < alpha:
flag = True
result = f'''======= z-test =======
T (sum of ranks) = {T}
(n1, n2) = ({n1}, {n2})
mu_t = {E_T}
sigma_t = {sigmaT}
z statistic value (observed) = {z:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({alternative}) → {flag}
'''
print(result)
result_dict = {'T': T, 'ET': E_T,
'sigmaT': sigmaT, 'z': z, 'p-value': p_value}
return updated_df, result_dict
def sign_binom_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):
n = diff.size - np.sum(diff == 0)
if sign == '+':
sign_count = np.sum(diff > 0)
else:
sign_count = np.sum(diff < 0)
if alternative == 'greater' or alternative == 'less':
# 如果超過一半就要切換
if sign_count > n / 2:
p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5)
else:
p_value = stats.binom.cdf(sign_count, n=n, p=0.5)
elif alternative == 'two-sided':
p_value = stats.binom.cdf(sign_count, n=n, p=0.5)
if p_value > 0.5:
p_value = 1 - stats.binom.cdf(sign_count - 1, n=n, p=0.5)
p_value *= 2
flag = False
if p_value < alpha:
flag = True
result = f'''======= Sign Test - Binomial Distribution =======
(For small sample size (<= 10))
Targeted Sign: {sign}
n = {n}
Sign counts = {sign_count}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({alternative}) → {flag}
'''
print(result)
return sign_count, p_value
def sign_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):
diff = diff[~(diff == 0)]
n = len(diff)
if sign == '+':
T = np.sum(diff > 0)
else:
T = np.sum(diff < 0)
z_stat = (T - 0.5 * n) / (.5 * (n ** 0.5))
# right tail
if alternative == 'greater':
p_value = 1 - stats.norm.cdf(z_stat)
elif alternative == 'less':
p_value = stats.norm.cdf(z_stat)
elif alternative == 'two-sided':
p_value = 1 - stats.norm.cdf(z_stat)
if p_value > 0.5:
p_value = stats.norm.cdf(z_stat)
p_value *= 2
flag = False
if p_value < alpha:
flag = True
result = f'''======= Sign Test - z Statistic =======
(For large sample size (> 10))
Targeted Sign: {sign}
n = {n}
Sign counts = {T}
z statistic = {z_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({alternative}) → {flag}
'''
print(result)
return T, p_value
def wilcoxon_signed_ranksum_z_test(diff=None, sign='+', alternative=None, precision=4, alpha=0.05):
diff = diff[~(diff == 0)]
n = len(diff)
diff_abs = np.sort(np.abs(diff).to_numpy())
updated_diff = pd.DataFrame({'diff_abs': diff_abs})
updated_diff['rank'] = updated_diff.index + 1
updated_diff = updated_diff.groupby('diff_abs').mean().reset_index()
new_df = pd.DataFrame({'diff': diff, 'diff_abs': np.abs(diff)})
new_df = pd.merge(new_df, updated_diff)
if sign == '+':
T = np.sum(new_df['rank'][new_df['diff'] > 0])
else:
T = np.sum(new_df['rank'][new_df['diff'] < 0])
E_T = n * (n + 1) / 4
sigma_T = (n * (n + 1) * (2 * n + 1) / 24) ** 0.5
z_stat = (T - E_T) / sigma_T
if alternative == 'greater':
# right tail test
p_value = 1 - stats.norm.cdf(z_stat)
elif alternative == 'less':
# left tail test
p_value = stats.norm.cdf(z_stat)
elif alternative == 'two-sided':
# two-tailed test
p_value = 1 - stats.norm.cdf(z_stat)
if p_value > 0.5:
p_value = stats.norm.cdf(z_stat)
p_value *= 2
flag = False
if p_value < alpha:
flag = True
result = f'''======= Wilcoxon Signed Rank Sum Test - z Statistic =======
(For large sample size (> 30))
Targeted Sign: {sign}
n = {n}
Sum of rank (T statistic) = {T}
mu_t = {E_T}
sigma_t = {sigma_T}
z statistic value (observed) = {z_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 ({alternative}) → {flag}
'''
print(result)
result_dict = {'n': n, 'T': T, 'E_T': E_T,
'sigma_T': sigma_T, 'z_stat': z_stat, 'p_value': p_value}
return new_df, result_dict
def kruskal_chi2_test(data=None, alpha=0.05, precision=4):
"""
col = 要比較的 target
row = data for each target
"""
if type(data) == pd.DataFrame:
data = data.copy().to_numpy()
alldata = np.concatenate(data.copy())
else:
alldata = np.concatenate(data.copy())
k = data.shape[1]
alldata.sort()
tmp_df = pd.DataFrame(({'value': alldata}))
tmp_df['rank'] = tmp_df.index + 1 # rank
value_to_rank = tmp_df.groupby('value').mean().reset_index()
T = []
sample_rank_df = []
for i in range(k):
samp = pd.DataFrame(
{'value': data[:, i][~np.isnan(data[:, i])]})
samp = pd.merge(samp, value_to_rank)
sample_rank_df.append(samp)
T.append(samp['rank'].sum())
n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)]
# print(T)
# print(n)
rule_of_five_str = ""
if (np.sum(np.array(n) < 5) > 0):
rule_of_five_str += "!(At least one sample size is less than 5)"
else:
rule_of_five_str += "(All sample size >= 5)"
N = np.sum(n)
t_over_n = 0
for i in range(k):
t_over_n += T[i] ** 2 / n[i]
H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1)
p_value = 1 - stats.chi2.cdf(H, k - 1)
chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)
result_dict = {'H': H, 'p-value': p_value,
'T': T, 'sample_rank_df': sample_rank_df}
flag = p_value < alpha
result = f'''======= Kruskal-Wallis Test with Chi-squared Test =======
{rule_of_five_str}
H statistic value (observed) = {H:.{precision}f}
chi2 critical value = {chi2_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (Not all {k} population locations are the same) → {flag}
'''
print(result)
return result_dict
def friedman_chi2_test(data=None, alpha=0.05, precision=4):
"""
col = 要比較的 target
row = blocked data for each target
"""
if type(data) == np.ndarray:
data = pd.DataFrame(data)
new_df = data.apply(grank, axis=1)
b, k = new_df.shape
rule_of_five_str = ""
if (b < 5 and k < 5):
rule_of_five_str += f"!(Number of blocks = {b} < 5 and number of populations = {k} < 5)"
else:
rule_of_five_str += f"(Number of blocks = {b} >= 5 or number of populations {k} >= 5)"
T = new_df.sum().to_numpy()
F_r = 12 / b / k / (k + 1) * np.sum(T ** 2) - 3 * b * (k + 1)
p_value = 1 - stats.chi2.cdf(F_r, k - 1)
chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)
result_dict = {'F_r': F_r, 'p-value': p_value,
'T': T, 'sample_ranked_df': new_df}
flag = p_value < alpha
result = f'''======= Friedman Test with Chi-squared Test =======
{rule_of_five_str}
F_r statistic value (observed) = {F_r:.{precision}f}
chi2 critical value = {chi2_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (Not all {k} population locations are the same) → {flag}
'''
print(result)
return result_dict
def pearson_test(data=None, a=None, b=None, alpha=0.05, precision=4):
"""
a, b 還不能傳入東西
Make sure that data is in the form of [a, b]
"""
cov_mat = np.cov(data.values, rowvar=False)
cor_mat = np.corrcoef(data.values, rowvar=False)
cov = cov_mat[0][1]
cor = cor_mat[0][1]
n = data.shape[0]
d_of_f = n - 2
t_c = stats.t.ppf(1 - alpha / 2, df=d_of_f)
t_stat = cor * (((n - 2) / (1 - cor ** 2)) ** 0.5)
flag = abs(t_stat) > t_c
result_dict = {'cov': cov, 't_stat': t_stat, 'cor': cor, 't_c': t_c}
results = f"""======= Pearson Correlation Coefficient =======
Covariance: {cov:.{precision}f}
Coefficient of Correlation: {cor:.{precision}f}
t (Critical Value) = {t_c:.{precision}f}
t (Observed Value) = {t_stat:.{precision}f}
Reject H_0 (There are linear relationship between two variables) → {flag}
"""
print(results)
return result_dict
def spearman_test(a=None, b=None, alpha=0.05, precision=4):
spearman_restult_cor, spearman_restult_p_value = stats.spearmanr(a, b)
# print(f'Correlation = {cor:.4f}, p-value={p_value:.4f}')
n = len(a)
rule_of_30_str = ''
results = f"""======= Spearman Rank Correlation Coefficient =======
[scipy.stats.spearmanr]
Coefficient of Correlation: {spearman_restult_cor:.{precision}f}
p-value={spearman_restult_p_value:.{precision}f} ({inter_p_value(spearman_restult_p_value)})
"""
if (n < 30):
rule_of_30_str += f"!(n = {n} < 30)"
flag = spearman_restult_p_value < alpha
results += f"""
Reject H_0 (There are relationship between two variables) → {flag}
"""
result_dict = {'spearman_result': [
spearman_restult_cor, spearman_restult_p_value]}
else:
rule_of_30_str += f"(n = {n} >= 30)"
flag = spearman_restult_p_value < alpha
results += f"""
Reject H_0 (There are relationship between two variables) → {flag}
"""
z_stat = spearman_restult_cor * ((n - 1) ** 0.5)
z_cv = stats.norm.ppf(1 - alpha/2)
p_value = stats.norm.sf(z_stat) * 2
if p_value > 1:
p_value = stats.norm.cdf(z_stat) * 2
flag = p_value < alpha
results += f"""
[z test statistic]
{rule_of_30_str}
r_s: {spearman_restult_cor:.{precision}f} (using spearmanr's result)
z stat (observed value) = {z_stat:.{precision}f}
z (critical value) = {z_cv:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (There are relationship between two variables) → {flag}
"""
result_dict = {'spearman_result': [
spearman_restult_cor, spearman_restult_p_value], 'z_stat': z_stat, 'z_cv': z_cv, 'p-value': p_value}
print(results)
return result_dict
|
#!/usr/bin/env python
#####################################
# Installation module for gpp-decrypt
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="Larry Spohn (Spoonman)"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/upgrade gpp-decrypt - a tool for decrypting passwords found in Group Policy Preferences (GPP)"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/SecurityToolsArchive/gpp-decrypt"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="gpp-decrypt"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git"
# DEPENDS FOR FEDORA INSTALLS
FEDORA="git"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS=""
# THIS WILL CREATE AN AUTOMATIC LAUNCHER FOR THE TOOL
LAUNCHER=""
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/option-k.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os.path
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('work1', 'work2', 'work3')
test.write('succeed.py', r"""
import sys
file = open(sys.argv[1], 'wb')
file.write("succeed.py: %s\n" % sys.argv[1])
file.close()
sys.exit(0)
""")
test.write('fail.py', r"""
import sys
sys.exit(1)
""")
#
# Test: work1
#
test.write(['work1', 'SConstruct'], """\
Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS')
Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS')
env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail })
env.Fail(target = 'aaa.1', source = 'aaa.in')
env.Succeed(target = 'aaa.out', source = 'aaa.1')
env.Succeed(target = 'bbb.out', source = 'bbb.in')
""" % locals())
test.write(['work1', 'aaa.in'], "aaa.in\n")
test.write(['work1', 'bbb.in'], "bbb.in\n")
test.run(chdir = 'work1',
arguments = 'aaa.out bbb.out',
stderr = 'scons: *** [aaa.1] Error 1\n',
status = 2)
test.must_not_exist(test.workpath('work1', 'aaa.1'))
test.must_not_exist(test.workpath('work1', 'aaa.out'))
test.must_not_exist(test.workpath('work1', 'bbb.out'))
test.run(chdir = 'work1',
arguments = '-k aaa.out bbb.out',
stderr = 'scons: *** [aaa.1] Error 1\n',
status = 2)
test.must_not_exist(test.workpath('work1', 'aaa.1'))
test.must_not_exist(test.workpath('work1', 'aaa.out'))
test.must_match(['work1', 'bbb.out'], "succeed.py: bbb.out\n")
test.unlink(['work1', 'bbb.out'])
test.run(chdir = 'work1',
arguments = '--keep-going aaa.out bbb.out',
stderr = 'scons: *** [aaa.1] Error 1\n',
status = 2)
test.must_not_exist(test.workpath('work1', 'aaa.1'))
test.must_not_exist(test.workpath('work1', 'aaa.out'))
test.must_match(['work1', 'bbb.out'], "succeed.py: bbb.out\n")
expect = """\
scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Cleaning targets ...
Removed bbb.out
scons: done cleaning targets.
"""
test.run(chdir = 'work1',
arguments = '--clean --keep-going aaa.out bbb.out',
stdout = expect)
test.must_not_exist(test.workpath('work1', 'aaa.1'))
test.must_not_exist(test.workpath('work1', 'aaa.out'))
test.must_not_exist(test.workpath('work1', 'bbb.out'))
#
# Test: work2
#
test.write(['work2', 'SConstruct'], """\
Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS')
Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS')
env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail })
env.Fail('aaa.out', 'aaa.in')
env.Succeed('bbb.out', 'aaa.out')
env.Succeed('ccc.out', 'ccc.in')
env.Succeed('ddd.out', 'ccc.in')
""" % locals())
test.write(['work2', 'aaa.in'], "aaa.in\n")
test.write(['work2', 'ccc.in'], "ccc.in\n")
test.run(chdir = 'work2',
arguments = '-k .',
status = 2,
stderr = None,
stdout = """\
scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Building targets ...
%(_python_)s ../fail.py aaa.out
%(_python_)s ../succeed.py ccc.out
%(_python_)s ../succeed.py ddd.out
scons: done building targets (errors occurred during build).
""" % locals())
test.must_not_exist(['work2', 'aaa.out'])
test.must_not_exist(['work2', 'bbb.out'])
test.must_match(['work2', 'ccc.out'], "succeed.py: ccc.out\n")
test.must_match(['work2', 'ddd.out'], "succeed.py: ddd.out\n")
#
# Test: work3
#
# Check that the -k (keep-going) switch works correctly when the Nodes
# forms a DAG. The test case is the following
#
# all
# |
# +-----+-----+-------------+
# | | |
# a1 a2 a3
# | | |
# + +---+---+ +---+---+
# \ | / | |
# \ bbb.out / a4 ccc.out
# \ / /
# \ / /
# \ / /
# aaa.out (fails)
#
test.write(['work3', 'SConstruct'], """\
Succeed = Builder(action = r'%(_python_)s ../succeed.py $TARGETS')
Fail = Builder(action = r'%(_python_)s ../fail.py $TARGETS')
env = Environment(BUILDERS = { 'Succeed' : Succeed, 'Fail' : Fail })
a = env.Fail('aaa.out', 'aaa.in')
b = env.Succeed('bbb.out', 'bbb.in')
c = env.Succeed('ccc.out', 'ccc.in')
a1 = Alias( 'a1', a )
a2 = Alias( 'a2', a+b)
a4 = Alias( 'a4', c)
a3 = Alias( 'a3', a4+c)
Alias('all', a1+a2+a3)
""" % locals())
test.write(['work3', 'aaa.in'], "aaa.in\n")
test.write(['work3', 'bbb.in'], "bbb.in\n")
test.write(['work3', 'ccc.in'], "ccc.in\n")
# Test tegular build (i.e. without -k)
test.run(chdir = 'work3',
arguments = '.',
status = 2,
stderr = None,
stdout = """\
scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Building targets ...
%(_python_)s ../fail.py aaa.out
scons: building terminated because of errors.
""" % locals())
test.must_not_exist(['work3', 'aaa.out'])
test.must_not_exist(['work3', 'bbb.out'])
test.must_not_exist(['work3', 'ccc.out'])
test.run(chdir = 'work3',
arguments = '-c .')
test.must_not_exist(['work3', 'aaa.out'])
test.must_not_exist(['work3', 'bbb.out'])
test.must_not_exist(['work3', 'ccc.out'])
# Current directory
test.run(chdir = 'work3',
arguments = '-k .',
status = 2,
stderr = None,
stdout = """\
scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Building targets ...
%(_python_)s ../fail.py aaa.out
%(_python_)s ../succeed.py bbb.out
%(_python_)s ../succeed.py ccc.out
scons: done building targets (errors occurred during build).
""" % locals())
test.must_not_exist(['work3', 'aaa.out'])
test.must_exist(['work3', 'bbb.out'])
test.must_exist(['work3', 'ccc.out'])
test.run(chdir = 'work3',
arguments = '-c .')
test.must_not_exist(['work3', 'aaa.out'])
test.must_not_exist(['work3', 'bbb.out'])
test.must_not_exist(['work3', 'ccc.out'])
# Single target
test.run(chdir = 'work3',
arguments = '--keep-going all',
status = 2,
stderr = None,
stdout = """\
scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Building targets ...
%(_python_)s ../fail.py aaa.out
%(_python_)s ../succeed.py bbb.out
%(_python_)s ../succeed.py ccc.out
scons: done building targets (errors occurred during build).
""" % locals())
test.must_not_exist(['work3', 'aaa.out'])
test.must_exist(['work3', 'bbb.out'])
test.must_exist(['work3', 'ccc.out'])
test.run(chdir = 'work3',
arguments = '-c .')
test.must_not_exist(['work3', 'aaa.out'])
test.must_not_exist(['work3', 'bbb.out'])
test.must_not_exist(['work3', 'ccc.out'])
# Separate top-level targets
test.run(chdir = 'work3',
arguments = '-k a1 a2 a3',
status = 2,
stderr = None,
stdout = """\
scons: Reading SConscript files ...
scons: done reading SConscript files.
scons: Building targets ...
%(_python_)s ../fail.py aaa.out
%(_python_)s ../succeed.py bbb.out
%(_python_)s ../succeed.py ccc.out
scons: done building targets (errors occurred during build).
""" % locals())
test.must_not_exist(['work3', 'aaa.out'])
test.must_exist(['work3', 'bbb.out'])
test.must_exist(['work3', 'ccc.out'])
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
import inspect
from abc import ABCMeta, abstractmethod
from bottle import PluginError, request
from conans.util.log import logger
class AuthorizationHeader(object):
""" Generic plugin to handle Authorization header. Must be extended and implement
some abstract methods in subclasses """
__metaclass__ = ABCMeta
name = 'authorizationheader'
api = 2
def __init__(self, keyword):
# Required
self.keyword = keyword
def setup(self, app):
""" Make sure that other installed plugins don't affect the same
keyword argument. """
for other in app.plugins:
if not isinstance(other, self.__class__):
continue
if other.keyword == self.keyword:
raise PluginError("Found another AuthorizationHeaderBottlePlugin plugin with "
"conflicting settings (non-unique keyword).")
def apply(self, callback, context):
""" Test if the original callback accepts a 'self.keyword' keyword. """
args = inspect.getargspec(context.callback)[0]
logger.debug("Call: %s" % str(callback))
if self.keyword not in args:
return callback
def wrapper(*args, **kwargs):
""" Check for user credentials in http header """
# Get Authorization
header_value = self.get_authorization_header_value()
new_kwargs = self.parse_authorization_value(header_value)
if not new_kwargs:
raise self.get_invalid_header_response()
kwargs.update(new_kwargs)
return callback(*args, **kwargs) # kwargs has :xxx variables from url
# Replace the route callback with the wrapped one.
return wrapper
def get_authorization_header_value(self):
""" Get from the request the header of http basic auth:
http://en.wikipedia.org/wiki/Basic_access_authentication """
auth_type = self.get_authorization_type()
if request.headers.get("Authorization", None) is not None:
auth_line = request.headers.get("Authorization", None)
if not auth_line.startswith("%s " % auth_type):
raise self.get_invalid_header_response()
return auth_line[len(auth_type) + 1:]
else:
return None
@abstractmethod
def get_authorization_type(self):
"""Abstract. Example: Basic (for http basic auth) or Beagle for JWT"""
raise NotImplementedError()
@abstractmethod
def parse_authorization_value(self, header_value):
"""Abstract. Parse header_value and return kwargs to apply bottle
method parameters"""
raise NotImplementedError()
@abstractmethod
def get_invalid_header_response(self):
"""A response from a malformed header"""
raise NotImplementedError()
|
""" core implementation of testing process: init, session, runtest loop. """
import re
import py
import pytest, _pytest
import os, sys, imp
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.runner import collect_one_node
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
name_re = re.compile("^[a-zA-Z_]\w*$")
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_true", default=False,
dest="exitfirst",
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type=int, dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group._addoption("-c", metavar="file", type=str, dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly', '--collect-only', action="store_true",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir",
help="only load conftest.py's relative to specified dir.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
pytest.config = config # compatibiltiy
if config.option.exitfirst:
config.option.maxfail = 1
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config.do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
doit(config, session)
except pytest.UsageError:
args = sys.exc_info()[1].args
for msg in args:
sys.stderr.write("ERROR: %s\n" %(msg,))
session.exitstatus = EXIT_USAGEERROR
except KeyboardInterrupt:
excinfo = py.code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = py.code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
else:
if session._testsfailed:
session.exitstatus = EXIT_TESTSFAILED
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
if initstate >= 1:
config.do_unconfigure()
config.pluginmanager.ensure_shutdown()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.config.option.collectonly:
return True
def getnextitem(i):
# this is a function to avoid python2
# keeping sys.exc_info set when calling into a test
# python2 keeps sys.exc_info till the frame is left
try:
return session.items[i+1]
except IndexError:
return None
for i, item in enumerate(session.items):
nextitem = getnextitem(i)
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
return path in ignore_paths
class FSHookProxy(object):
def __init__(self, fspath, config):
self.fspath = fspath
self.config = config
def __getattr__(self, name):
plugins = self.config._getmatchingplugins(self.fspath)
x = self.config.hook._getcaller(name, plugins)
self.__dict__[name] = x
return x
def compatproperty(name):
def fget(self):
# deprecated - use pytest.name
return getattr(pytest, name)
return property(fget)
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return iter(seen)
def __len__(self):
return len(self.__iter__())
def keys(self):
return list(self)
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node, )
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
#self.extrainit()
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
#def extrainit(self):
# """"extra initialization after Node is initialized. Implemented
# by some subclasses. """
Module = compatproperty("Module")
Class = compatproperty("Class")
Instance = compatproperty("Instance")
Function = compatproperty("Function")
File = compatproperty("File")
Item = compatproperty("Item")
def _getcustomclass(self, name):
cls = getattr(self, name)
if cls != getattr(pytest, name):
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
"use pytest_pycollect_makeitem(...) to create custom "
"collection nodes" % name)
return cls
def __repr__(self):
return "<%s %r>" %(self.__class__.__name__,
getattr(self, 'name', None))
def warn(self, code, message):
""" generate a warning with the given code and message for this
item. """
assert isinstance(code, str)
fslocation = getattr(self, "location", None)
if fslocation is None:
fslocation = getattr(self, "fspath", None)
else:
fslocation = "%s:%s" % fslocation[:2]
self.ihook.pytest_logwarning(code=code, message=message,
nodeid=self.nodeid,
fslocation=fslocation)
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker):
""" dynamically add a marker object to the node.
``marker`` can be a string or pytest.mark.* instance.
"""
from _pytest.mark import MarkDecorator
if isinstance(marker, py.builtin._basestring):
marker = MarkDecorator(marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name. """
val = self.keywords.get(name, None)
if val is not None:
from _pytest.mark import MarkInfo, MarkDecorator
if isinstance(val, (MarkDecorator, MarkInfo)):
return val
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
item = self
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def getplugins(self):
return self.config._getmatchingplugins(self.fspath)
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style="long"
else:
self._prunetraceback(excinfo)
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
return excinfo.getrepr(funcargs=True,
showlocals=self.config.option.showlocals,
style=style, tbfilter=tbfilter)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _memocollect(self):
""" internal helper method to cache results of calling collect(). """
return self._memoizedcall('_collected', lambda: list(self.collect()))
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
relpath = self.fspath.relto(self.config.rootdir)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None):
super(Item, self).__init__(name, parent, config, session)
self._report_sections = []
def add_report_section(self, when, key, content):
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Session(FSCollector):
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
def __init__(self, config):
FSCollector.__init__(self, config.rootdir, parent=None,
config=config, session=self)
self.config.pluginmanager.register(self, name="session", prepend=True)
self._testsfailed = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self._fs2hookproxy = {}
def _makeid(self):
return ""
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, 'wasxfail'):
self._testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self._testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self._testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
try:
return self._fs2hookproxy[fspath]
except KeyError:
self._fs2hookproxy[fspath] = x = FSHookProxy(fspath, self.config)
return x
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
#XXX: test this
raise pytest.UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" %(arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
mod = None
path = [os.path.abspath('.')] + sys.path
for name in x.split('.'):
# ignore anything that's not a proper name here
# else something like --pyargs will mess up '.'
# since imp.find_module will actually sometimes work for it
# but it's supposed to be considered a filesystem path
# not a package
if name_re.match(name) is None:
return x
try:
fd, mod, type_ = imp.find_module(name, path)
except ImportError:
return x
else:
if fd is not None:
fd.close()
if type_[2] != imp.PKG_DIRECTORY:
path = [os.path.dirname(mod)]
else:
path = [mod]
return mod
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
arg = str(arg)
if self.config.option.pyargs:
arg = self._tryconvertpyarg(arg)
parts = str(arg).split("::")
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
msg = "file or package not found: "
else:
msg = "file not found: "
raise pytest.UsageError(msg + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, pytest.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
has_matched = False
for x in rep.result:
if x.name == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, pytest.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
|
import numpy as np
from PyQt5.QtCore import (QAbstractTableModel, QModelIndex, QObject, Qt,
QVariant, pyqtProperty, pyqtSignal, pyqtSlot)
from ..hub import Hub, Message
class PlotDataModel(QAbstractTableModel):
# DataRole = Qt.UserRole + 1
def __init__(self, *args, **kwargs):
super(PlotDataModel, self).__init__(*args, **kwargs)
self._data = list(zip(np.arange(100), np.random.sample(100)))
# The data model needs to listen for add data events
self._hub = Hub()
# self._hub.subscribe(AddDataMessage, self.add_data, self)
# self._hub.subscribe(AddPlotDataMessage, self.add_data, self)
# def roleNames(self):
# return {
# self.DataRole: b'data'
# }
def rowCount(self, parent=None, *args, **kwargs):
return len(self._data)
def columnCount(self, parent=None, *args, **kwargs):
return 2
def data(self, index, role=None):
return self._data[index.row()][index.column()]
# if role == self.DataRole:
# return self._data[index.row()]
if role == Qt.DisplayRole:
return self._data[index.row()][index.column()]
elif role == Qt.EditRole:
return self._data[index.row()][index.column()]
return QVariant()
|
import pretty_midi
import glob
import os
import copy
from collections import Counter
from multiprocessing.dummy import Pool as ThreadPool
from tqdm import tqdm
# Import shared files
import sys
sys.path.append('..')
from Shared_Files.Global_Util import *
from Shared_Files.Constants import *
import warnings
warnings.filterwarnings("ignore")
class MidiPreProcessor:
"""
Reads across multiple Notes sets stores meta Notes on each
set and associated files for Notes analysis and model training.
"""
def __init__(self, path_to_full_data_set,
genre_sub_sample_set=sys.maxsize,
generate_validation=False):
"""
:param path_to_full_data_set:
Pass in a string to the path of directory holding all dataset(s)
:param genre_sub_sample_set:
Parses each genre into a subset based on the passed integer value.
:param generate_validation:
Boolean to mark files to be used as validation
"""
# Progress-bar for threading-pool
self.__pbar = None
# ---
self.__all_possible_instr_note_pairs = set()
self.__all_possible_instr_note_pairs_counter = Counter()
self.__instr_note_pairs_dict = dict()
self.__all_instruments = set()
# Files to ignore for when splicing Notes into train/test
self.__blacklisted_files_validation = set()
# Stores all genres to another dict that stores
# the corresponding file note size
self.__genre_file_dict = dict()
self.__genre_instr_note_counters = dict()
# Stores all corrupted files found
self.__corrupted_files_paths = []
# Store files that are to small (Determined by the input sequence)
self.__small_files_paths = []
# Init encoders and decoders
self.__master_instr_note_encoder = dict()
self.__master_instr_note_decoder = dict()
self.__master_instr_encoder = dict()
self.__master_instr_decoder = dict()
self.__master_genre_encoder = dict()
self.__master_genre_decoder = dict()
# ---------------------------------
# Numeric counts
self.__total_file_count = 0
self.__total_intr_note_pair_size = 0
# Thread pool out reading multiple files of each dataset
thread_pool_results = self.__thread_pool_datasets_reader(
self.__genre_dataset_init, path_to_full_data_set, genre_sub_sample_set)
# Init all Notes based on thread pool results
for genre_count, genre_dataset_result in enumerate(thread_pool_results):
# Add to set of all instr/note pairs
self.__all_possible_instr_note_pairs |= genre_dataset_result["genre_instr_note_pairs"]
# Add to set of all instruments
self.__all_instruments |= genre_dataset_result["genre_instruments"]
# Numeric value of non-unique total instr/note pairs
self.__total_intr_note_pair_size += genre_dataset_result[
"genre_size"]
# Store files based on the genre of songs
self.__genre_file_dict = {**self.__genre_file_dict,
**genre_dataset_result["genre_file_meta_data"]}
# Store counter object based on genre
self.__genre_instr_note_counters[genre_dataset_result[
"genre_name"]] = genre_dataset_result["genre_instr_note_pairs_counter"]
# Counter object of all possible instr/note
self.__all_possible_instr_note_pairs_counter += genre_dataset_result["genre_instr_note_pairs_counter"]
# ---
self.__corrupted_files_paths += genre_dataset_result[
"corrupted_files"]
self.__small_files_paths += genre_dataset_result["small_files"]
# Sort all Notes before encoding for my own sanity
self.__all_possible_instr_note_pairs = sorted(
self.__all_possible_instr_note_pairs)
self.__all_instruments = sorted(self.__all_instruments)
self.__instr_note_pairs_dict = {instr:[instr_note_pair
for instr_note_pair in self.__all_possible_instr_note_pairs
if instr_note_pair.find(instr) != -1]
for instr in self.__all_instruments}
# Begin creating label encoders and decoders
# -----
for label, (genre, _) in enumerate(
self.__genre_instr_note_counters.items()):
self.__master_genre_encoder[genre] = label + 1
self.__master_genre_decoder = {v: k for k, v
in self.__master_genre_encoder.items()}
# -----
for label, instr_note_pair in enumerate(
self.__all_possible_instr_note_pairs):
self.__master_instr_note_encoder[instr_note_pair] = label + 1
self.__master_instr_note_decoder = {v: k for k, v
in
self.__master_instr_note_encoder.items()}
# -----
for label, instr in enumerate(
self.__all_instruments):
self.__master_instr_encoder[instr] = label + 1
self.__master_instr_decoder = {v: k for k, v
in self.__master_instr_encoder.items()}
# -------------------------------------
# Corrupted files were found.
if self.__corrupted_files_paths:
print("The Pre Processor found {0} corrupted files".format(len(self.__corrupted_files_paths)))
print("Displaying all corrupted songs:\n")
for song in self.__corrupted_files_paths:
print("\t", song.split("/", 6)[-1])
print()
display_options_menu(menu_intro="Corrupted files found!\n"
"\tIt's fine if you don't delete"
" them.Just know the pre-processor"
" will not use them at all.",
menu_options={1: "Delete all corrupted files",
2: "Ignore"})
user_input = input("\nInput: ")
# Remove corrupted files
if user_input == "1":
self.delete_corrupted_files()
else:
pass
# ---------------------------------------------
# Small files were found.
if self.__small_files_paths:
print("The Pre Processor found {0} files that"
" are smaller or equal to than {1} Classical_Notes".format(
len(self.__small_files_paths),
MIDI_CONSTANTS.SMALL_FILE_CHECK))
print("Displaying all small songs:\n")
for song in self.__small_files_paths:
print("\t", song.split("/", 6)[-1])
print()
display_options_menu(menu_intro="Small files found!\n"
"\tIt's fine if you don't delete"
" them.Just know the pre-processor"
" will not use them at all.",
menu_options={1: "Delete all small files",
2: "Ignore"})
user_input = input("\nInput: ")
# Remove small files
if user_input == "1":
self.delete_small_files()
else:
pass
# ---------------------------------------------
if generate_validation:
# Marks files to be selected for validation
self.__generate_validation_files()
def __thread_pool_datasets_reader(self, func,
path_to_full_data_set,
genre_sub_sample_set):
"""
Thread pools out the dataset by genre
"""
# Get all folder paths for each genre
all_train_datasets_paths = [x[0] for x in os.walk(
path_to_full_data_set)]
all_train_datasets_paths.pop(0)
all_files_by_genre = []
for dataset_pth in all_train_datasets_paths:
dataset_files = [dataset_pth + "/" + file for file in
glob.glob1(dataset_pth, "*.mid")][:genre_sub_sample_set]
# Ensures files were actually extracted
if len(dataset_files):
self.__total_file_count += len(dataset_files)
all_files_by_genre.append(dataset_files)
# Init progress bar
self.__pbar = tqdm(total=self.__total_file_count)
# Begin threaded pool
pool = ThreadPool(HARDWARE_RELATED_CONSTANTS.THREAD_POOL_AMOUNT)
all_results = pool.imap_unordered(func,
all_files_by_genre)
# End threaded pool
pool.close()
pool.join()
self.__pbar.close()
self.__pbar = None
return all_results
def __genre_dataset_init(self, genre_train_files):
"""
Init full dataset attributes on MidiPreProcessor init
"""
# Store meta Notes on file and genre specific Notes
genre_instr_note_pairs = set()
genre_instr_note_pairs_counter = Counter()
genre_instruments = set()
genre_file_meta_data = dict()
genre_size = 0
# Store invalid file paths
corrupted_files = []
small_files = []
genre_name = genre_train_files[0].split("/")[-2].replace('_Midi', '')
for _, file in enumerate(genre_train_files):
# Update thread pool progress bar
self.__pbar.update(1)
self.__pbar.set_postfix_str(s=file.split("/", -1)[-1][:20],
refresh=True)
# Meta Notes on the file
midi_file_attr = self.read_midi_file(file)
# Check if flags were raised
if midi_file_attr["corrupted"]:
corrupted_files.append(file)
elif midi_file_attr["small_file_check"]:
small_files.append(file)
# File passed requirements; store meta Notes on genre and file
else:
genre_instruments |= midi_file_attr["instruments"]
genre_instr_note_pairs |= set(
midi_file_attr["flat_instr_note_seq"])
genre_size += midi_file_attr["flat_instr_note_seq_len"]
genre_file_meta_data[file] = {"flat_instr_note_seq":
midi_file_attr[
"flat_instr_note_seq"],
"flat_instr_note_seq_len":
midi_file_attr[
"flat_instr_note_seq_len"],
"instruments":
midi_file_attr[
"instruments"],}
genre_instr_note_pairs_counter += Counter(midi_file_attr["flat_instr_note_seq"])
return {"genre_name": genre_name,
"genre_size": genre_size,
"genre_instruments": genre_instruments,
"genre_instr_note_pairs": genre_instr_note_pairs,
"genre_instr_note_pairs_counter": genre_instr_note_pairs_counter,
"genre_file_meta_data": {genre_name: genre_file_meta_data},
"corrupted_files": corrupted_files,
"small_files": small_files,}
def __generate_validation_files(self):
"""
Mark files for the validation set
"""
self.__blacklisted_files_validation = set()
# Find files for best fit the for the validation set per genre
for genre_name, instr_note_counter in self.__genre_instr_note_counters.items():
genre_note_count = sum(instr_note_counter.values())
needed_validation_note_count = int(
(genre_note_count / self.__total_intr_note_pair_size) \
* genre_note_count)
note_count_file_dict = {file_meta_data["flat_instr_note_seq_len"]: file_name
for file_name, file_meta_data
in self.__genre_file_dict[
genre_name].items()}
note_count_file_list = list(note_count_file_dict.keys())
'''
The validation count is decreasing per file note count;
When it reaches this arbitrary threshold the validation
set for this particular genre has been reached
'''
while True and needed_validation_note_count > 25:
closest_file_note_count = find_nearest(
numbers=note_count_file_list,
target=needed_validation_note_count)
needed_validation_note_count -= closest_file_note_count
self.__blacklisted_files_validation.add(
note_count_file_dict[closest_file_note_count])
note_count_file_list.remove(closest_file_note_count)
def read_midi_file(self, file):
"""
Extract out the instruments/Classical_Notes of the midi file.
"""
# Attempt to parse midi file
try:
midi_data = pretty_midi.PrettyMIDI(file)
# Midi file couldn't be opened; Raise flag; return dummy dict
except:
return {"flat_instr_note_seq": [],
"flat_instr_note_seq_len": 0,
"instruments": {},
"small_file_check": False,
"corrupted": True}
# Stores instrument note pair
flat_instr_note_seq = []
file_instruments = set()
# Move through midi file; store Notes on instrument/note relationship in
# string
for instr in midi_data.instruments:
for note_obj in instr.notes:
program_instr_str = "Program" + PARAMETER_VAL_SPLITTER.STR + str(instr.program)\
+ INSTRUMENT_NOTE_SPLITTER.STR\
+ "Is_Drum" + PARAMETER_VAL_SPLITTER.STR + str(instr.is_drum)
file_instruments.add(program_instr_str)
flat_instr_note_seq.append(
(program_instr_str + INSTRUMENT_NOTE_SPLITTER.STR + "Note" + PARAMETER_VAL_SPLITTER.STR
+ pretty_midi.note_number_to_name(note_obj.pitch),
note_obj))
# ---
flat_instr_note_seq_len = len(flat_instr_note_seq)
# File is to small for our neural networks to take; Raise flag;
if flat_instr_note_seq_len <= MIDI_CONSTANTS.SMALL_FILE_CHECK:
return {"flat_instr_note_seq": flat_instr_note_seq,
"flat_instr_note_seq_len": flat_instr_note_seq_len,
"instruments": file_instruments,
"small_file_check": True,
"corrupted": False}
# Sort Classical_Notes in proper sequence based on their starting and end points
flat_instr_note_seq.sort(key=lambda tup: (tup[1].start, tup[1].end))
flat_instr_note_seq = [instr_note[0] for instr_note in
flat_instr_note_seq]
# Return dict for more explict multi return type
return {"flat_instr_note_seq": flat_instr_note_seq,
"flat_instr_note_seq_len": flat_instr_note_seq_len,
"instruments": file_instruments,
"small_file_check": False,
"corrupted": False}
# Delete the unused files from personal directory
def delete_corrupted_files(self):
for song in self.__corrupted_files_paths:
os.remove(song)
self.__corrupted_files_paths = []
def delete_small_files(self):
for song in self.__small_files_paths:
os.remove(song)
self.__small_files_paths = []
# --------------- Setters ---------------
def re_init_validation(self, new_file_list):
self.__blacklisted_files_validation = new_file_list
# --------------- Getters ---------------
def return_all_possible_instr_note_pairs(self):
return copy.deepcopy(self.__all_possible_instr_note_pairs)
def return_genre_instr_note_counters(self):
return copy.deepcopy(self.__genre_instr_note_counters)
def return_all_possible_instr_note_pairs_counter(self):
return copy.deepcopy(self.__all_possible_instr_note_pairs_counter)
# ----
def return_all_instruments(self):
return copy.deepcopy(self.__all_instruments)
def return_instr_note_pairs_dict(self):
return copy.deepcopy(self.__instr_note_pairs_dict)
# ----
def return_blacklisted_files_validation(self):
return copy.deepcopy(self.__blacklisted_files_validation)
def return_genre_file_dict(self):
return copy.deepcopy(self.__genre_file_dict)
# ----
def return_corrupted_files_paths(self):
return copy.deepcopy(self.__corrupted_files_paths)
def return_small_files_paths(self):
return copy.deepcopy(self.__small_files_paths)
# ----
def return_master_instr_note_encoder(self):
return copy.deepcopy(self.__master_instr_note_encoder)
def return_master_instr_note_decoder(self):
return copy.deepcopy(self.__master_instr_note_decoder)
# ----
def return_master_instr_encoder(self):
return copy.deepcopy(self.__master_instr_encoder)
def return_master_instr_decoder(self):
return copy.deepcopy(self.__master_instr_decoder)
# ----
def return_master_genre_encoder(self):
return copy.deepcopy(self.__master_genre_encoder)
def return_master_genre_decoder(self):
return copy.deepcopy(self.__master_genre_decoder)
# --------------- Basic Functionality ---------------
def encode_instr_note(self, instr_note_str):
return self.__master_instr_note_encoder[instr_note_str]
def encode_instr_note_seq(self, instr_note_seq):
return [self.__master_instr_note_encoder[instr_note_pair]
for instr_note_pair in instr_note_seq]
# ----
def decode_instr_note(self, instr_note_num):
return self.__master_instr_note_decoder[instr_note_num]
def decode_instr_note_seq(self, instr_note_seq):
return [self.__master_instr_note_decoder[instr_note_pair]
for instr_note_pair in instr_note_seq]
# ----
def encode_instr(self, instr_str):
return self.__master_instr_encoder[instr_str]
def decode_instr(self, instr_num):
return self.__master_instr_decoder[instr_num]
# ----
def encode_genre(self, genre_str):
return self.__master_genre_encoder[genre_str]
def decode_genre(self, genre_num):
return self.__master_genre_decoder[genre_num]
|
# Copyright 2019 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cyborg import objects
from cyborg.tests.unit.db import base
from cyborg.tests.unit.db import utils
class TestDeviceObject(base.DbTestCase):
def setUp(self):
super(TestDeviceObject, self).setUp()
self.fake_device = utils.get_test_device()
def test_get(self):
uuid = self.fake_device['uuid']
with mock.patch.object(self.dbapi, 'device_get',
autospec=True) as mock_device_get:
mock_device_get.return_value = self.fake_device
device = objects.Device.get(self.context, uuid)
mock_device_get.assert_called_once_with(self.context, uuid)
self.assertEqual(self.context, device._context)
def test_list(self):
with mock.patch.object(self.dbapi, 'device_list',
autospec=True) as mock_device_list:
mock_device_list.return_value = [self.fake_device]
devices = objects.Device.list(self.context)
self.assertEqual(1, mock_device_list.call_count)
self.assertEqual(1, len(devices))
self.assertIsInstance(devices[0], objects.Device)
self.assertEqual(self.context, devices[0]._context)
def test_create(self):
with mock.patch.object(self.dbapi, 'device_create',
autospec=True) as mock_device_create:
mock_device_create.return_value = self.fake_device
device = objects.Device(self.context,
**self.fake_device)
device.create(self.context)
mock_device_create.assert_called_once_with(
self.context, self.fake_device)
self.assertEqual(self.context, device._context)
def test_destroy(self):
uuid = self.fake_device['uuid']
with mock.patch.object(self.dbapi, 'device_get',
autospec=True) as mock_device_get:
mock_device_get.return_value = self.fake_device
with mock.patch.object(self.dbapi, 'device_delete',
autospec=True) as mock_device_delete:
device = objects.Device.get(self.context, uuid)
device.destroy(self.context)
mock_device_delete.assert_called_once_with(self.context,
uuid)
self.assertEqual(self.context, device._context)
def test_update(self):
uuid = self.fake_device['uuid']
with mock.patch.object(self.dbapi, 'device_get',
autospec=True) as mock_device_get:
mock_device_get.return_value = self.fake_device
with mock.patch.object(self.dbapi, 'device_update',
autospec=True) as mock_device_update:
fake = self.fake_device
fake["vendor_board_info"] = "new_vendor_board_info"
mock_device_update.return_value = fake
device = objects.Device.get(self.context, uuid)
device.vendor_board_info = 'new_vendor_board_info'
device.save(self.context)
mock_device_get.assert_called_once_with(self.context,
uuid)
mock_device_update.assert_called_once_with(
self.context, uuid,
{'vendor_board_info': 'new_vendor_board_info'})
self.assertEqual(self.context, device._context)
|
from utils import*
from random import*
formattedProxies = []
def chooseProxy(tasknum):
if tasknum + 1 <= len(proxieslines):
proxy = proxieslines[tasknum].rstrip()
if tasknum + 1 > len(proxieslines):
if len(proxieslines) > 1:
a = randint(1, len(proxieslines) - 1)
if len(proxieslines) == 1:
a = 0
proxy = proxieslines[a].rstrip()
try:
proxytest = proxy.split(":")[2]
userpass = True
except IndexError:
userpass = False
if userpass == False:
proxyedit = proxy
if userpass == True:
ip = proxy.split(":")[0]
port = proxy.split(":")[1]
userpassproxy = ip + ':' + port
proxyedit = userpassproxy
proxyuser = proxy.split(":")[2]
proxyuser = proxyuser.rstrip()
proxypass = proxy.split(":")[3]
proxyuser = proxyuser.rstrip()
if userpass == True:
proxies = {'http': 'http://' + proxyuser + ':' + proxypass + '@' + userpassproxy,
'https': 'https://' + proxyuser + ':' + proxypass + '@' + userpassproxy}
if userpass == False:
proxies = {'http': 'http://' + proxy,
'https': 'https://' + proxy}
global formattedProxies
formattedProxies.append(proxies)
return proxies
def importProxies(proxyfile):
p = open('{}.txt'.format(proxyfile))
global proxieslines
proxieslines = p.readlines()
numproxies = len(proxieslines)
global formattedProxies
if numproxies > 0:
formattedProxies = []
for i in range (0,len(proxieslines)):
chooseProxy(i)
if numproxies == 0:
formattedProxies = [None]
# print(formattedProxies[0])
xlpFormat() #IMPORTANT DO NOT REMOVE OR ELSE SCRIPT WILL BREAK.
log('%s proxies loaded' % numproxies)
return formattedProxies
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .subtitles import SubtitlesInfoExtractor
from ..compat import (
compat_urllib_request,
compat_urllib_parse,
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
float_or_none,
)
class CeskaTelevizeIE(SubtitlesInfoExtractor):
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
_TESTS = [
{
'url': 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220',
'info_dict': {
'id': '214411058091220',
'ext': 'mp4',
'title': 'Hyde Park Civilizace',
'description': 'Věda a současná civilizace. Interaktivní pořad - prostor pro vaše otázky a komentáře',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 3350,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
{
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
'info_dict': {
'id': '14716',
'ext': 'mp4',
'title': 'První republika: Zpěvačka z Dupárny Bobina',
'description': 'Sága mapující atmosféru první republiky od r. 1918 do r. 1945.',
'thumbnail': 're:^https?://.*\.jpg',
'duration': 88.4,
},
'params': {
# m3u8 download
'skip_download': True,
},
},
]
def _real_extract(self, url):
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
data = {
'playlist[0][type]': typ,
'playlist[0][id]': episode_id,
'requestUrl': compat_urllib_parse_urlparse(url).path,
'requestSource': 'iVysilani',
}
req = compat_urllib_request.Request(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
data=compat_urllib_parse.urlencode(data))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('x-addr', '127.0.0.1')
req.add_header('X-Requested-With', 'XMLHttpRequest')
req.add_header('Referer', url)
playlistpage = self._download_json(req, video_id)
playlist_url = playlistpage['url']
if playlist_url == 'error_region':
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
req = compat_urllib_request.Request(compat_urllib_parse.unquote(playlist_url))
req.add_header('Referer', url)
playlist = self._download_json(req, video_id)
item = playlist['playlist'][0]
formats = []
for format_id, stream_url in item['streamUrls'].items():
formats.extend(self._extract_m3u8_formats(stream_url, video_id, 'mp4'))
self._sort_formats(formats)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
duration = float_or_none(item.get('duration'))
thumbnail = item.get('previewImageUrl')
subtitles = {}
subs = item.get('subtitles')
if subs:
subtitles['cs'] = subs[0]['url']
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
subtitles = self._fix_subtitles(self.extract_subtitles(video_id, subtitles))
return {
'id': episode_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
@staticmethod
def _fix_subtitles(subtitles):
""" Convert millisecond-based subtitles to SRT """
if subtitles is None:
return subtitles # subtitles not requested
def _msectotimecode(msec):
""" Helper utility to convert milliseconds to timecode """
components = []
for divider in [1000, 60, 60, 100]:
components.append(msec % divider)
msec //= divider
return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
def _fix_subtitle(subtitle):
for line in subtitle.splitlines():
m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
if m:
yield m.group(1)
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
yield "{0} --> {1}".format(start, stop)
else:
yield line
fixed_subtitles = {}
for k, v in subtitles.items():
fixed_subtitles[k] = "\r\n".join(_fix_subtitle(v))
return fixed_subtitles
|
from collections.abc import Iterable, Mapping
from typing import Any
from profile_generator.feature.colors.white_balance.schema import DEFAULT
from profile_generator.model.view import raw_therapee
from profile_generator.model.view.raw_therapee import EqPoint, LinearEqPoint
from profile_generator.schema import object_of, range_of
_LC_ENABLED = "LCEnabled"
_HH_CURVE = "HhCurve"
_CH_CURVE = "ChCurve"
_LH_CURVE = "LhCurve"
DEFAULT = {
_LC_ENABLED: "false",
_HH_CURVE: raw_therapee.CurveType.LINEAR,
_CH_CURVE: raw_therapee.CurveType.LINEAR,
_LH_CURVE: raw_therapee.CurveType.LINEAR,
}
_STEPS = 7.0
_COLORS_SCHEMA = object_of(
{
"magenta": range_of(-_STEPS, _STEPS),
"red": range_of(-_STEPS, _STEPS),
"yellow": range_of(-_STEPS, _STEPS),
"green": range_of(-_STEPS, _STEPS),
"cyan": range_of(-_STEPS, _STEPS),
"blue": range_of(-_STEPS, _STEPS),
}
)
SCHEMA = object_of(
{"hue": _COLORS_SCHEMA, "saturation": _COLORS_SCHEMA, "luminance": _COLORS_SCHEMA}
)
_BASE_VALUE = 0.5
_COLORS = [
"red",
"yellow",
"green",
"cyan",
"blue",
"magenta",
]
HUES = {
"red": 0 / 360,
"yellow": 60 / 360,
"green": 120 / 360,
"cyan": 180 / 360,
"blue": 240 / 360,
"magenta": 300 / 360,
}
def process(data: Any) -> Mapping[str, str]:
result: dict[str, str] = {}
result |= _get_eq_curve(data, "hue", 0.25, _HH_CURVE)
result |= _get_eq_curve(data, "saturation", 0.3, _CH_CURVE)
result |= _get_eq_curve(data, "luminance", 0.07, _LH_CURVE)
return DEFAULT | result
def _get_eq_curve(
data: Any, key_name: str, max_adjustment: float, template_name: str
) -> Mapping[str, str]:
config = data.get(key_name, {})
equalizer = _get_equalizer(config, max_adjustment)
if any(p.y != _BASE_VALUE for p in equalizer):
return {
_LC_ENABLED: "true",
template_name: raw_therapee.CurveType.STANDARD
+ raw_therapee.present_equalizer(equalizer),
}
else:
return {}
def _get_equalizer(
config: Mapping[str, int], max_adjustment: float
) -> Iterable[EqPoint]:
return [
LinearEqPoint(HUES[color], _get_value(config, color, max_adjustment))
for color in _COLORS
]
def _get_value(config: Mapping[str, int], color: str, max_adjustment: float) -> float:
adjustment = config.get(color, 0)
return _BASE_VALUE + adjustment / _STEPS * max_adjustment
|
"""
Author: Shreck Ye
Date: June 16, 2019
Time complexity: O(log(N))
Let's think in the mathematical way. Obviously, the recursion formula represents a linear relationship.
By viewing it as a recursion formula of a single vector F_n = (f_n, f_{n + 1})' with a transition matrix M = (0, 1; 1, 1),
which is (f_{n + 1}, f_{n + 2})' = (0, 1; 1, 1) (f_n, f_{n + 1})' namely F_{n + 1} = M F_n,
we can get the result using matrix exponentiation and reduce the number of recursions.
"""
import copy
F_0 = [[0], [1]]
M = [[0, 1], [1, 1]]
def zero_matrix(m: int, n: int):
rows = [None] * m
row = [0] * n
for i in range(m):
rows[i] = copy.deepcopy(row)
return rows
def matmul(A, B):
# More checks of matrix shapes may be performed
m = len(A)
n = len(B)
l = len(B[0])
C = zero_matrix(m, l)
for i in range(m):
for j in range(l):
sum = 0
A_i = A[i]
for k in range(n):
sum += A_i[k] * B[k][j]
C[i][j] = sum
return C
def eye(size: int):
E = zero_matrix(size, size)
for i in range(size):
E[i][i] = 1
return E
def matrix_power(A, n: int):
size = len(A)
if n == 0:
return eye(size)
elif n == 1:
return copy.deepcopy(A)
else:
A_pow_half_n = matrix_power(A, n // 2)
A_pow_n = matmul(A_pow_half_n, A_pow_half_n)
if n % 2:
A_pow_n = matmul(A_pow_n, A)
return A_pow_n
class Solution:
def fib(self, N: int) -> int:
return matmul(matrix_power(M, N), F_0)[0][0]
# Test cases
s = Solution()
print(s.fib(0), s.fib(1), s.fib(2), s.fib(3), s.fib(4), s.fib(5))
|
stacks_data = [
{
'name': 'Python',
'image': '../assets/images/python.png'
},
{
'name': 'Plotly',
'image': '../assets/images/plotly.png'
},
{
'name': 'Dash',
'image': '../assets/images/dash.png'
},
{
'name': 'Pandas',
'image': '../assets/images/pandas.png'
},
{
'name': 'Keras',
'image': '../assets/images/keras.png'
},
{
'name': 'TensorFlow',
'image': '../assets/images/tensorflow.png'
},
{
'name': 'Sklearn',
'image': '../assets/images/sklearn.png'
}
]
|
# python imports
import numpy as np
from PIL import Image
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from sys import exit
# File containing all of the functions used in the predict program
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint["arch"] == 'VGG':
model = models.vgg16(pretrained=True)
elif checkpoint["arch"] == 'Densenet':
model = models.densenet121(pretrained=True)
else:
print("Unsupported arch used in checkpoint")
exit(1)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
# Load classifier from checkpoint
classifier = checkpoint['classifier']
model.classifier = classifier
model.load_state_dict(checkpoint['model_state_dict'])
return model
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
pil_image = Image.open(image_path)
# Resize
if pil_image.size[0] > pil_image.size[1]:
pil_image.thumbnail((5000, 256))
else:
pil_image.thumbnail((256, 5000))
# Crop
left_margin = (pil_image.width-224)/2
bottom_margin = (pil_image.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))
# Normalize
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array
# Color channel needs to be first; retain the order of the other two dimensions.
np_image = np_image.transpose((2, 0, 1))
return np_image
def predict(image_path, model, topk, gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = process_image(image_path)
if gpu:
model.to('cuda')
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
else:
model.to('cpu')
image = torch.from_numpy(image).type(torch.FloatTensor)
# Returns a new tensor with a dimension of size one inserted at the specified position.
image = image.unsqueeze(0)
output = model.forward(image)
probabilities = torch.exp(output)
# Probabilities and the indices of those probabilities corresponding to the classes
top_probabilities, top_indices = probabilities.topk(topk)
# Convert to lists
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
# Convert topk_indices to the actual class labels using class_to_idx
# Invert the dictionary so you get a mapping from index to class.
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
#print(idx_to_class)
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes
|
import logging
from django.contrib.auth.models import User
from django.db import models, transaction
from django.db.models import Q
from django.utils import timezone
from eve_api.models import Structure, EVEPlayerCharacter, ObjectType
from dataclasses import dataclass
from django.apps import apps
from django.core.cache import cache
logger=logging.getLogger(__name__)
class TransactionLinkage(models.Model):
id = models.BigAutoField(primary_key=True)
source_transaction = models.ForeignKey("PlayerTransaction", related_name="source_transaction", on_delete=models.CASCADE)
destination_transaction = models.ForeignKey("PlayerTransaction", related_name="destination_transaction",on_delete=models.CASCADE)
quantity_linked = models.BigIntegerField()
date_linked = models.DateTimeField(default=timezone.now)
route = models.ForeignKey("TradingRoute", on_delete=models.CASCADE)
class Meta:
index_together = [
["route", "date_linked"]
]
def save(self, *args, **kwargs):
''' On save, update timestamps '''
if not self.id:
self.created = timezone.now()
return super(TransactionLinkage, self).save(*args, **kwargs)
@dataclass
class TransactionSource:
fuzzy: bool
unit_price: float
total_price: float
linkages : list
class PlayerTransaction(models.Model):
ccp_id = models.BigIntegerField(primary_key=True)
character = models.ForeignKey(EVEPlayerCharacter, on_delete=models.CASCADE)
client_id = models.BigIntegerField()
timestamp = models.DateTimeField()
is_buy = models.BooleanField()
is_personal = models.BooleanField()
journal_ref_id = models.BigIntegerField()
location = models.ForeignKey(Structure, on_delete=models.CASCADE)
quantity = models.BigIntegerField()
object_type = models.ForeignKey(ObjectType, on_delete=models.CASCADE)
unit_price = models.FloatField()
# we don't index these because we don't want the entire goddamn index rebuilt every time there's a change made
quantity_without_known_source = models.BigIntegerField()
quantity_without_known_destination = models.BigIntegerField()
def __str__(self):
return "Transaction #{}".format(self.pk)
@staticmethod
def exists(ccp_id):
"""
Cache-backed exists method. Cache only hits for Structures we know exist.
:param ccp_id:
:return:
"""
exists = cache.get("transaction_exists_%s" % ccp_id)
if exists is not None:
return True
else:
exists_db = PlayerTransaction.objects.filter(pk=ccp_id).exists()
if exists_db:
# after 90 days we DGAF
timeout = 86400 * 90
cache.set("transaction_exists_%s" % ccp_id, True, timeout=timeout)
return exists_db
def get_source_value(self, quantity, route):
if quantity > self.quantity:
raise Exception("somethings broken with {}".format(self.pk))
linkages = TransactionLinkage.objects.filter(Q(destination_transaction=self) & Q(route=route))
if not linkages:
return None
ret_links = []
quant_accounted = 0
sum_of_products = 0
for link in linkages:
quant_accounted += link.quantity_linked
sum_of_products += link.quantity_linked * link.source_transaction.unit_price
ret_links.append(link)
fuzzy = False if quant_accounted == quantity else True
unit_price = sum_of_products / quant_accounted
if fuzzy:
sum_of_products = quantity / quant_accounted * sum_of_products
return TransactionSource(fuzzy, unit_price, sum_of_products, ret_links)
class Meta:
index_together = [
["location", "object_type", "character", "timestamp", "is_buy"]
]
def _get_routes_that_apply_to_transaction(self):
TradingRoute_lazy = apps.get_model('market', 'TradingRoute')
routes = TradingRoute_lazy.objects.filter(
destination_character = self.character,
destination_structure = self.location
)
return routes
def link_transactions(self):
# todo: LOCK THE SHIT OUT OF THIS
routes = self._get_routes_that_apply_to_transaction()
older_than = self.timestamp
new_links = []
transactions_to_save = []
attributed = 0
for route in routes:
transactions = PlayerTransaction.objects.filter(
location=route.source_structure,
object_type=self.object_type,
character=route.source_character,
timestamp__lte=older_than,
quantity_without_known_destination__gt=0,
is_buy=True,
).order_by('timestamp')
for source_txn in transactions:
if source_txn.quantity_without_known_destination >= self.quantity_without_known_source:
contribution = self.quantity_without_known_source
else:
contribution = source_txn.quantity_without_known_destination
self.quantity_without_known_source -= contribution
source_txn.quantity_without_known_destination -= contribution
attributed += contribution
link = TransactionLinkage(
source_transaction = source_txn,
destination_transaction = self,
quantity_linked = contribution,
route = route
)
new_links.append(link)
transactions_to_save.append(source_txn)
if not self.quantity_without_known_source:
break
if not self.quantity_without_known_source:
break
if transactions_to_save:
logger.info("Successfully attributed {} units of transaction {}".format(attributed, self.pk))
with transaction.atomic():
self.save()
for t in transactions_to_save:
t.save()
TransactionLinkage.objects.bulk_create(new_links)
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from webob import exc
from nova.api.openstack.compute.plugins.v3 import \
instance_usage_audit_log as ial
from nova import context
from nova import db
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.objects import test_service
from nova import utils
import urllib
service_base = test_service.fake_service
TEST_COMPUTE_SERVICES = [dict(service_base, host='foo', topic='compute'),
dict(service_base, host='bar', topic='compute'),
dict(service_base, host='baz', topic='compute'),
dict(service_base, host='plonk', topic='compute'),
dict(service_base, host='wibble', topic='bogus'),
]
begin1 = datetime.datetime(2012, 7, 4, 6, 0, 0)
begin2 = end1 = datetime.datetime(2012, 7, 5, 6, 0, 0)
begin3 = end2 = datetime.datetime(2012, 7, 6, 6, 0, 0)
end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
#test data
TEST_LOGS1 = [
#all services done, no errors.
dict(host="plonk", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=23, message="test1"),
dict(host="baz", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=17, message="test2"),
dict(host="bar", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=10, message="test3"),
dict(host="foo", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=7, message="test4"),
]
TEST_LOGS2 = [
#some still running...
dict(host="plonk", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=23, message="test5"),
dict(host="baz", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=17, message="test6"),
dict(host="bar", period_beginning=begin2, period_ending=end2,
state="RUNNING", errors=0, task_items=10, message="test7"),
dict(host="foo", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=7, message="test8"),
]
TEST_LOGS3 = [
#some errors..
dict(host="plonk", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=23, message="test9"),
dict(host="baz", period_beginning=begin3, period_ending=end3,
state="DONE", errors=2, task_items=17, message="test10"),
dict(host="bar", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=10, message="test11"),
dict(host="foo", period_beginning=begin3, period_ending=end3,
state="DONE", errors=1, task_items=7, message="test12"),
]
def fake_task_log_get_all(context, task_name, begin, end,
host=None, state=None):
assert task_name == "instance_usage_audit"
if begin == begin1 and end == end1:
return TEST_LOGS1
if begin == begin2 and end == end2:
return TEST_LOGS2
if begin == begin3 and end == end3:
return TEST_LOGS3
raise AssertionError("Invalid date %s to %s" % (begin, end))
def fake_last_completed_audit_period(unit=None, before=None):
audit_periods = [(begin3, end3),
(begin2, end2),
(begin1, end1)]
if before is not None:
for begin, end in audit_periods:
if before > end:
return begin, end
raise AssertionError("Invalid before date %s" % (before))
return begin1, end1
class InstanceUsageAuditLogTest(test.TestCase):
def setUp(self):
super(InstanceUsageAuditLogTest, self).setUp()
self.context = context.get_admin_context()
timeutils.set_time_override(datetime.datetime(2012, 7, 5, 10, 0, 0))
self.controller = ial.InstanceUsageAuditLogController()
self.host_api = self.controller.host_api
def fake_service_get_all(context, disabled):
self.assertTrue(disabled is None)
return TEST_COMPUTE_SERVICES
self.stubs.Set(utils, 'last_completed_audit_period',
fake_last_completed_audit_period)
self.stubs.Set(db, 'service_get_all',
fake_service_get_all)
self.stubs.Set(db, 'task_log_get_all',
fake_task_log_get_all)
def tearDown(self):
super(InstanceUsageAuditLogTest, self).tearDown()
timeutils.clear_time_override()
def test_index(self):
req = fakes.HTTPRequestV3.blank('/os-instance_usage_audit_log')
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_format1(self):
before = urllib.quote("2012-07-05 10:00:00")
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_format2(self):
before = urllib.quote('2012-07-05 10:00:00.10')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 0 errors.", logs['overall_status'])
def test_index_with_invalid_format(self):
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=abc')
self.assertRaises(exc.HTTPBadRequest, self.controller.index, req)
def test_index_with_running(self):
before = urllib.quote('2012-07-06 10:00:00')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(0, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(3, logs['num_hosts_done'])
self.assertEquals(1, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("3 of 4 hosts done. 0 errors.",
logs['overall_status'])
def test_index_with_errors(self):
before = urllib.quote('2012-07-07 10:00:00')
req = fakes.HTTPRequestV3.blank(
'/os-instance_usage_audit_log?before=' + before)
result = self.controller.index(req)
self.assertIn('instance_usage_audit_log', result)
logs = result['instance_usage_audit_log']
self.assertEquals(57, logs['total_instances'])
self.assertEquals(3, logs['total_errors'])
self.assertEquals(4, len(logs['log']))
self.assertEquals(4, logs['num_hosts'])
self.assertEquals(4, logs['num_hosts_done'])
self.assertEquals(0, logs['num_hosts_running'])
self.assertEquals(0, logs['num_hosts_not_run'])
self.assertEquals("ALL hosts done. 3 errors.",
logs['overall_status'])
|
import unittest
from katas.kyu_6.regexp_basics_is_it_ipv4_address import ipv4_address
class IPV4AddressTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(ipv4_address('127.0.0.1'))
def test_true_2(self):
self.assertTrue(ipv4_address('0.0.0.0'))
def test_true_3(self):
self.assertTrue(ipv4_address('255.255.255.255'))
def test_true_4(self):
self.assertTrue(ipv4_address('10.20.30.40'))
def test_false(self):
self.assertFalse(ipv4_address(''))
def test_false_2(self):
self.assertFalse(ipv4_address('10.256.30.40'))
def test_false_3(self):
self.assertFalse(ipv4_address('10.20.030.40'))
def test_false_4(self):
self.assertFalse(ipv4_address('127.0.1'))
def test_false_5(self):
self.assertFalse(ipv4_address('127.0.0.0.1'))
def test_false_6(self):
self.assertFalse(ipv4_address('..255.255'))
def test_false_7(self):
self.assertFalse(ipv4_address('127.0.0.1\n'))
def test_false_8(self):
self.assertFalse(ipv4_address('\n127.0.0.1'))
def test_false_9(self):
self.assertFalse(ipv4_address(' 127.0.0.1'))
def test_false_10(self):
self.assertFalse(ipv4_address('127.0.0.1 '))
def test_false_11(self):
self.assertFalse(ipv4_address(' 127.0.0.1 '))
|
import os
import pytest
from xebec.src import _validate as vd
def test_validate_table(data_paths, tmp_path):
err_biom = os.path.join(tmp_path, "err.biom")
with open(err_biom, "w") as f:
f.write("kachow")
with pytest.raises(ValueError) as exc_info:
vd.validate_table(err_biom)
exp_err_msg = "Table is empty!"
assert str(exc_info.value) == exp_err_msg
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_table("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
def test_validate_metadata(data_paths, tmp_path):
err_md = os.path.join(tmp_path, "err.tsv")
with open(err_md, "w") as f:
f.write("kerblam")
with pytest.raises(ValueError) as exc_info:
vd.validate_metadata(err_md)
exp_err_msg = "Metadata is empty!"
assert str(exc_info.value) == exp_err_msg
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_metadata("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
def test_validate_metadata(data_paths, tmp_path):
with pytest.raises(FileNotFoundError) as exc_info:
vd.validate_tree("NOT A FILE")
exp_err_msg = "[Errno 2] No such file or directory: 'NOT A FILE'"
assert str(exc_info.value) == exp_err_msg
|
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import Namespace
import onnx
from extensions.front.onnx.pad_ext import PadFrontExtractor
from mo.graph.graph import Graph
from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
class TestPad(BaseExtractorsTestingClass):
@staticmethod
def _create_node(pads=None, value=None, mode=None):
if pads is None:
pads = [1, 2, 3, 4]
if value is None:
value = 0.0
if mode is None:
mode = 'constant'
pb = onnx.helper.make_node(
'Pad',
pads=pads,
mode=mode,
value=value,
inputs=['a'],
outputs=['b']
)
graph = Graph()
node = PB({'pb': pb, 'graph': graph})
return node
def test_ok(self):
node = self._create_node()
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'constant',
'fill_value': 0
}
self.compare()
def test_reflect(self):
node = self._create_node(mode='reflect')
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'reflect',
'fill_value': 0
}
self.compare()
def test_non_zero_fill_value(self):
node = self._create_node(value=1.0)
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'constant',
'fill_value': 1.0
}
self.compare()
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
import frappe.utils
from frappe.desk.page.setup_wizard.install_fixtures import update_global_search_doctypes
from frappe.test_runner import make_test_objects
from frappe.utils import global_search
class TestGlobalSearch(unittest.TestCase):
def setUp(self):
update_global_search_doctypes()
global_search.setup_global_search_table()
self.assertTrue("__global_search" in frappe.db.get_tables())
doctype = "Event"
global_search.reset()
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "subject", "in_global_search", 1, "Int")
make_property_setter(doctype, "event_type", "in_global_search", 1, "Int")
make_property_setter(doctype, "roles", "in_global_search", 1, "Int")
make_property_setter(doctype, "repeat_on", "in_global_search", 0, "Int")
def tearDown(self):
frappe.db.sql("DELETE FROM `tabProperty Setter` WHERE `doc_type`='Event'")
frappe.clear_cache(doctype="Event")
frappe.db.sql("DELETE FROM `tabEvent`")
frappe.db.sql("DELETE FROM `__global_search`")
make_test_objects("Event")
frappe.db.commit()
def insert_test_events(self):
frappe.db.sql("DELETE FROM `tabEvent`")
phrases = [
'"The Sixth Extinction II: Amor Fati" is the second episode of the seventh season of the American science fiction.',
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. ",
"Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy.",
]
for text in phrases:
frappe.get_doc(
dict(doctype="Event", subject=text, repeat_on="Monthly", starts_on=frappe.utils.now_datetime())
).insert()
global_search.sync_global_search()
frappe.db.commit()
def test_search(self):
self.insert_test_events()
results = global_search.search("awakens")
self.assertTrue(
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. "
in results[0].content
)
results = global_search.search("extraterrestrial")
self.assertTrue(
"Carter explored themes of extraterrestrial involvement in ancient mass extinctions in this episode, the third in a trilogy."
in results[0].content
)
results = global_search.search("awakens & duty & alien")
self.assertTrue(
"After Mulder awakens from his coma, he realizes his duty to prevent alien colonization. "
in results[0].content
)
def test_update_doc(self):
self.insert_test_events()
test_subject = "testing global search"
event = frappe.get_doc("Event", frappe.get_all("Event")[0].name)
event.subject = test_subject
event.save()
frappe.db.commit()
global_search.sync_global_search()
results = global_search.search("testing global search")
self.assertTrue("testing global search" in results[0].content)
def test_update_fields(self):
self.insert_test_events()
results = global_search.search("Monthly")
self.assertEqual(len(results), 0)
doctype = "Event"
from frappe.custom.doctype.property_setter.property_setter import make_property_setter
make_property_setter(doctype, "repeat_on", "in_global_search", 1, "Int")
global_search.rebuild_for_doctype(doctype)
results = global_search.search("Monthly")
self.assertEqual(len(results), 3)
def test_delete_doc(self):
self.insert_test_events()
event_name = frappe.get_all("Event")[0].name
event = frappe.get_doc("Event", event_name)
test_subject = event.subject
results = global_search.search(test_subject)
self.assertTrue(
any(r["name"] == event_name for r in results), msg="Failed to search document by exact name"
)
frappe.delete_doc("Event", event_name)
global_search.sync_global_search()
results = global_search.search(test_subject)
self.assertTrue(
all(r["name"] != event_name for r in results),
msg="Deleted documents appearing in global search.",
)
def test_insert_child_table(self):
frappe.db.sql("delete from tabEvent")
phrases = [
"Hydrus is a small constellation in the deep southern sky. ",
"It was first depicted on a celestial atlas by Johann Bayer in his 1603 Uranometria. ",
"The French explorer and astronomer Nicolas Louis de Lacaille charted the brighter stars and gave their Bayer designations in 1756. ",
'Its name means "male water snake", as opposed to Hydra, a much larger constellation that represents a female water snake. ',
"It remains below the horizon for most Northern Hemisphere observers.",
"The brightest star is the 2.8-magnitude Beta Hydri, also the closest reasonably bright star to the south celestial pole. ",
"Pulsating between magnitude 3.26 and 3.33, Gamma Hydri is a variable red giant some 60 times the diameter of our Sun. ",
"Lying near it is VW Hydri, one of the brightest dwarf novae in the heavens. ",
"Four star systems have been found to have exoplanets to date, most notably HD 10180, which could bear up to nine planetary companions.",
]
for text in phrases:
doc = frappe.get_doc(
{"doctype": "Event", "subject": text, "starts_on": frappe.utils.now_datetime()}
)
doc.insert()
global_search.sync_global_search()
frappe.db.commit()
def test_get_field_value(self):
cases = [
{
"case_type": "generic",
"data": """
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>
<script>
var options = {
foo: "bar"
}
</script>
<p class="p1"><span class="s1">Contrary to popular belief, Lorem Ipsum is not simply random text. It has
roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock,
a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur,
from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source.
Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum et Malorum" (The Extremes of Good and Evil) by Cicero,
written in 45 BC. This book is a treatise on the theory of ethics, very popular during the Renaissance. The first line of Lorem Ipsum,
"Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.</span></p>
""",
"result": (
"Description : Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical "
"Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, "
"looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word "
'in classical literature, discovered the undoubtable source. Lorem Ipsum comes from sections 1.10.32 and 1.10.33 of "de Finibus Bonorum '
'et Malorum" (The Extremes of Good and Evil) by Cicero, written in 45 BC. This book is a treatise on the theory of ethics, very popular '
'during the Renaissance. The first line of Lorem Ipsum, "Lorem ipsum dolor sit amet..", comes from a line in section 1.10.32.'
),
},
{
"case_type": "with_style",
"data": """
<style type="text/css"> p.p1 {margin: 0.0px 0.0px 0.0px 0.0px; font: 14.0px 'Open Sans';
-webkit-text-stroke: #000000} span.s1 {font-kerning: none} </style>Lorem Ipsum Dolor Sit Amet
""",
"result": "Description : Lorem Ipsum Dolor Sit Amet",
},
{
"case_type": "with_script",
"data": """
<script>
var options = {
foo: "bar"
}
</script>
Lorem Ipsum Dolor Sit Amet
""",
"result": "Description : Lorem Ipsum Dolor Sit Amet",
},
]
for case in cases:
doc = frappe.get_doc(
{
"doctype": "Event",
"subject": "Lorem Ipsum",
"starts_on": frappe.utils.now_datetime(),
"description": case["data"],
}
)
field_as_text = ""
for field in doc.meta.fields:
if field.fieldname == "description":
field_as_text = global_search.get_formatted_value(doc.description, field)
self.assertEqual(case["result"], field_as_text)
def test_web_page_index(self):
global_search.update_global_search_for_all_web_pages()
global_search.sync_global_search()
frappe.db.commit()
results = global_search.web_search("unsubscribe")
self.assertTrue("Unsubscribe" in results[0].content)
results = global_search.web_search(
text="unsubscribe", scope='manufacturing" UNION ALL SELECT 1,2,3,4,doctype from __global_search'
)
self.assertTrue(results == [])
|
#!/bin/python3 -*- coding: utf-8 -*-
"""
@Author : Jessy JOSE -- Pierre VAUDRY
IPSA Aero1 - Prim2
Release date: 09/12/2020
[other information]
Licence: MIT
[Description]
SMC is a security message communication.
This program is the part of server program.
The server uses the socket module to work.
To improve communication between the client and the server, we use the select module to select a specific socket.
The datetime, os and platform modules are used to make the server fully functional. These modules are used to date
operations, and to clean the console if necessary depending on the platform used.
[Functions]:
Clean() -- clear console
documentation() -- make native documentation and basic screen interface
log() -- log all data receive on server
log_connection() -- log all connection make with server
process_server() -- interprets the data received and exploits it
list_log() -- conversion of data inside text file to list
str_log(data) -- conversion of a list to str
consoleCommand() -- make a native console after communication to see log.txt
connection_server() -- main process of the server
run() -- run and launch server
[Global variable]:
{int variable}
PORT
{str variable}
HOST
affichage_logo
{dict variable}
server_data
{list variable}
client_connected
[Other variable]:
Many other constants and variable may be defined; these may be used in calls to
the process_server(), list_log(), str_log(data), consoleCommand() and connection_server() functions
"""
# ---------------------------------------------Import module section-------------------------------------------------- #
import datetime
import select
import socket
import os
import platform
# ------------------------------------------------Global variable----------------------------------------------------- #
# Definition of local server variable
# Host is local adress for binding the server
HOST = '127.0.0.1'
# Port is the gate than the client take to discuss with the client
PORT = 50100
# Initialisation of list to make a stockage of connected client on the server
client_connected = []
# server_data is a dict. It's use to make a count of client
server_data = {
'count': 0
}
# ------------------------------------------------Functions & process------------------------------------------------- #
def Clean():
"""
[description]
clean is a process to clean main console
:return: none
"""
if platform.system() == "Windows":
os.system("cls")
elif platform.system() == "Linux":
os.system("clear")
def documentation():
"""
[Description]
This process return a native and basic documentation to the administrator of the serverS with a great ascii art
screen
:return: none
"""
affichage_logo = '\033[36m' + """
___ _ _ __ __ _____ _ _ _
/ ____| (_) | | \/ | / ____| (_) | | (_)
| (___ ___ ___ _ _ _ __ _| |_ _ _ | \ / | ___ ___ ___ __ _ __ _ ___ | | ___ _ __ ___ _ __ ___ _ _ _ __ _ ___ __ _| |_ _ ___ _ __
\___ \ / _ \/ __| | | | '__| | __| | | | | |\/| |/ _ \/ __/ __|/ _` |/ _` |/ _ \ | | / _ \| '_ ` _ \| '_ ` _ \| | | | '_ \| |/ __/ _` | __| |/ _ \| '_ \
____) | __/ (__| |_| | | | | |_| |_| | | | | | __/\__ \__ \ (_| | (_| | __/ | |___| (_) | | | | | | | | | | | |_| | | | | | (_| (_| | |_| | (_) | | | |
|_____/ \___|\___|\__,_|_| |_|\__|\__, | |_| |_|\___||___/___/\__,_|\__, |\___| \_____\___/|_| |_| |_|_| |_| |_|\__,_|_| |_|_|\___\__,_|\__|_|\___/|_| |_|
__/ | __/ |
|___/ |___/
/%&@@@@@&%/
@@@@@@@@&&(((((&&@@@@@@@@.
@@@@@,,,,,,,,,,,,,,,,,,,,,,,@@@@@
@@@@,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,@@@@
&@@@,,,,,,,,,,,,,,%@*,,%/%@%***@,,,,,,,&@@&
@@@(@@@@@@@@@@@*,,,,*,,,,,,,,,,,,,,,,,,,,,@@@
&@@@@@@@&,,,.....%@@@@@@@@*,,,,,,,,,,,,,,,,,,,,@@@
(@@@@&(#((***,,,,....,.......@@@@@,,,,,,,,,,,,,,,,,@@@
@@@@*,#*((/(/(/,,,,,,,,...,.... ,@@@&,,,,,,,,,,,,,,@@@
@@@,,/,,(*,/%/(((*,,,,.,....,.,.. ,..,@@@%,,,,,,,,,,,&@@
@@@./. ..*(((/#//***,*,,,,*,,*.. ..,, . @@@,,,,,,,,,@@@
@@@#,/**..*@@@#(//***#@@&,*,.,,.&@@#. ,,.. .@@%,,,,,,@@@#
@@(*%(,,/@@@(@@@%/*#@@@/@@@**.@@@/&@@(.. . @@@,,/@@@@
@@@#.,(/,@@@@@@@(..*@@@@@@@(..@@@@@@@ @@@@@@@
,@@(/((*#**#/(/, .*, ..&*////(,, @@@
@@@ */*(/*/. ..... . ... /*(.,.,, .@@@
%@@@. . . .., ., . . *,**.*, #@@@
@@@@(. .., ,. ,. .@@@
%@@@@@@(,.. ,. .. . .&@@@ , &@@
%@@@@@@@@@@@@@@@* @@@. (@@
@@@.@@.
@@@@.
@@.
""" + '\033[39m'
print(affichage_logo)
print('\t\t\t\t\t\t\t\t\t\t\t\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#')
print('\t\t\t\t\t\t\t\t\t\t\t\t# ' + '\033[31m' + 'Welcome in SMC server' + '\033[39m' +
' #')
print('\t\t\t\t\t\t\t\t\t\t\t\t#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#')
print('Reste de la doc à venir °°°')
def log(data):
"""
[description]
log is a process to make a history file of all conversation between client in the server.
He use a str value given by client and save it in a texte file.
:param data: str value given by client to the server
:return: none
"""
# Open file text as lmsg and write data with return line, after close the file in use
with open("log.txt", "a") as lmsg:
lmsg.write(data + "\n")
lmsg.close()
def log_connection(client_connected):
"""
[description]
log_connection is a process to make a history file of all connection client in the server.
He use a type value given by module socket and save it in a texte file..
:param client_connected: type value given by socket module
:return: none
"""
# Open file text as lc and write in file the date time of server, the client information in str
# After close the file
with open("log_connection.txt", "a") as lc:
lc.write(datetime.datetime.isoformat(datetime.datetime.now()) +
"> {} connected on server \n".format(client_connected))
lc.close()
def process_server(data):
"""
[description]
process_server is a function that processes data given by client.
It's a part of server to use client data.
He use a str value given by client and use it in process.
:param data: str value given by client in main bool of server in the part where data are receive
:return: response a str but this variable is not use in the rest of program (because we have not make yet a
functionnaly than use this var)
"""
# Rename data as response and use it in log process
response = data
if response != '/stop':
log(response)
else:
pass
return response
def list_log():
"""
[description]
list_log() is a function to change text file to list.
He open and take all data in log.txt.
He take all ligne and append in output list named dlog
:return: llog list created to use data in file txt
"""
# Open file text as lg and create a list named dlog.
# for line in lg, in variable named lastL, make a str variable named s and split the line with separator '@'
# After append variable l in list dlog and close
with open('log.txt', 'r') as lg:
llog = []
for line in lg:
s = line.strip("\n")
lastL = s.split("@")
llog.append(lastL)
lg.close()
return llog
def str_log(data):
"""
[description]
str_log is a function to change a list to str data.
He split element of list and join all element to make a str data.
But only the last line is returned and use
:param data: list of all data exchange between client2server and server2client
:return: list of all data exchange between client2server and server2client
"""
# Create a empty local variable named str_l
# for i in the range of len data, for j in range of len of all data element, join the last element in variable str_l
str_l = ''
for i in range(len(data)):
for j in range(len(data[i])):
str_l = ','.join(data[i - 1])
return str_l
def consoleCommand(event):
"""
[description]
:param event:
:return:
"""
if event == '/log.txt':
log = open("./log.txt", "r")
contenu = log.read()
print(contenu)
else:
exit()
# This process is named connection_server because client connextion's are logged and used by server.
# connection_server is a inspired of send() process but the code is not the same.
def connection_server():
"""
[description]
connection_server is a main process in the server program.
He use socket module to create a server.
It's the main part of server.
He take global value of the program like HOST and PORT, to bind and launch the server and listen all connection in
socket
Create a connection, wait to receive data, process it with the
process_request function.
AF_INET represents the IPv4 address family.
SOCK_STREAM represents the TCP protocol.
:return: none
"""
# Creating a socket, by creating a socket object named s.
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Allows to reuse the same address
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.bind (address,port) binds an address and a port to socket s.
# The address parameter is a tuple consisting of the IP address of the
# server and a port number.
# s.bind((data_server['HOST'], data_server['PORT']))
s.bind((HOST, PORT))
# s.listen () makes the server ready to accept connections.
s.listen(5)
print('{serverver-status}:', '\033[32m', 'Online', '\033[1m')
print(s)
# Variable that starts the while loop
server_ready = True
turn = server_data['count']
while server_ready:
# wait_connections is variable with client waiting for connection and dialogue with server
# Select allows the first client in wait client to connect with the server every 0,05 second
wait_connections, wlist, xlist = select.select([s], [], [], 0.05)
select.select([s], [], [], 0.05)
# for connection in wait connections, accept the first client who wants to connect with server
# and append this client in list of connected_client, print connection_client in the console and log this
# connection with the process log_connection
for connection in wait_connections:
connection_client, info_connection = connection.accept()
client_connected.append(connection_client)
print("Prosition : ", turn , " | Client : ", connection_client)
turn = turn + 1
log_connection(connection_client)
####################################################################################################################
# Create a empty list read_client
read_client = []
# Part of the program that passes the possible errors in order to run the rest program
try:
read_client, wlist, xlist = select.select(client_connected, [], [], 0.05)
except select.error:
pass
else:
# for client in read_client, receive message of this client, and use process_server to record the message
for client in read_client:
msg_recv = client.recv(1024)
msg_recv = msg_recv.decode()
process_server(msg_recv)
print('\033[39m', '[', '\033[31m', 'SERVER@', '\033[36m', HOST, '\033[33m', '-p ', str(PORT),
'\033[39m', ']: Client send a message. Go to ./log.txt to see more.')
if msg_recv == "/stop":
server_ready = False
break
###############################################
# Function or process to open last message and convert him
d_l = list_log()
c2c = str_log(d_l)
p_server = c2c
###############################################
# encode the message give by server
byte_data = p_server.encode()
# send message to the client
client.sendall(byte_data)
####################################################################################################################
#console = input("[" + datetime.datetime.isoformat(datetime.datetime.now()) + "](/log.txt to see log in server)>")
#consoleCommand(console) # This line, give to the administrator the console to oppen and see log
print("Close all connections")
# For client in client_connected, disconnect all client
for client in client_connected:
client.close()
# close the server after executing while bool
s.close()
Clean()
def run():
"""
[description]
Run process
:return: none
"""
print('[' + '\033[31m' + 'SERVER@' + '\033[36m' + HOST + ' ' + '\033[33m' + '-p ' + str(PORT) + '\033[39m' + ']:\n')
while True:
connection_server()
# -------------------------------------------Run & Start server program----------------------------------------------- #
if __name__ == '__main__':
# Give basic and native documentation in console
documentation()
# Run the program
run()
|
# Django settings for test_project project.
import os
def map_path(directory_name):
return os.path.join(os.path.dirname(__file__),
'../' + directory_name).replace('\\', '/')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': map_path('database/test_project.sqlite'),
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': ''
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8**a!c8$1x)p@j2pj0yq!*v+dzp24g*$918ws#x@k+gf%0%rct'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'sample_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sample_project.wsgi.application'
TEMPLATE_DIRS = (
map_path('templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'adminsortable',
'app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
import math
from typing import Iterable
from .base import BaseMeasure
class OverlapMeasure(BaseMeasure):
def __init__(self, db=None, maxsize: int = 100) -> None:
super().__init__()
if db:
self.maxsize = db.max_feature_size()
else:
self.maxsize = maxsize
def min_feature_size(self, query_size, alpha) -> int:
# return 1 # Not sure the below isn't sufficient
return math.floor(query_size * alpha) or 1
def max_feature_size(self, query_size, alpha) -> int:
return self.maxsize
def minimum_common_feature_count(
self, query_size: int, y_size: int, alpha: float
) -> int:
return int(math.ceil(alpha * min(query_size, y_size)))
def similarity(self, X: Iterable[str], Y: Iterable[str]) -> int:
return min(len(set(X)), len(set(Y)))
class LeftOverlapMeasure(BaseMeasure):
def __init__(self, db=None, maxsize: int = 100) -> None:
super().__init__()
if db:
self.maxsize = db.max_feature_size()
else:
self.maxsize = maxsize
def min_feature_size(self, query_size, alpha) -> int:
return math.floor(query_size * alpha) or 1
def max_feature_size(self, query_size, alpha) -> int:
return self.maxsize
def minimum_common_feature_count(
self, query_size: int, y_size: int, alpha: float
) -> int:
return math.floor(query_size * alpha) or 1
def similarity(self, X: Iterable[str], Y: Iterable[str]) -> float:
return 1 - len(set(X) - set(Y)) / len(set(X))
|
from fastapi import Depends, HTTPException, status, Header
from fastapi.security import OAuth2PasswordBearer
from pydantic import ValidationError
from jose import jwt
from webapi.db.config import async_session
from webapi.db import models, schemas
from webapi.db.dals.user_dal import UserDAL
from webapi.setting import settings
from webapi.utils import security
reusable_oauth2 = OAuth2PasswordBearer(
tokenUrl=f'/api/admin/login/access_token/'
)
class DALGetter:
def __init__(self, dal_cls):
self.dal_cls = dal_cls
async def __call__(self):
async with async_session() as session:
async with session.begin():
yield self.dal_cls(session)
async def get_current_user(
dal: UserDAL = Depends(DALGetter(UserDAL)), token: str = Depends(reusable_oauth2)
) -> models.User:
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(
token, settings.SECRET_KEY, algorithms=[security.ALGORITHM]
)
token_data = schemas.token.TokenPayload(**payload)
except (jwt.JWTError, ValidationError):
raise credentials_exception
user = await dal.get(id=token_data.sub)
if user is None:
raise credentials_exception
return user
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Spaghetti: Web Application Security Scanner
#
# @url: https://github.com/m4ll0k/Spaghetti
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'doc/LICENSE'
import re
class Binarysec():
@staticmethod
def Run(headers):
_ = False
try:
for item in list(headers.items()):
_ = re.search(r'BinarySec',item[1],re.I) is not None
_ |= re.search(r'x-binarysec-[via|nocahe]',item[0],re.I) is not None
if _:
return "BinarySEC Web Application Firewall (BinarySEC)"
break
except Exception as ERROR:
print(ERROR)
|
import RPi.GPIO as GPIO
import time
#0 #1 #2 #3 #4 #5 #6 #7 #8 #9 #10 #11 #12 #13
#list=[261.6256|,293.6648|,329.6276|,349.2282|,391.9954|,440|,493.8833|,523.2511|,587.3295|,659.2551|,698.4565|,783.9909|,880|,987.7666]
#num=[2,4,5,8,8,7,6,5,6,4,6,8,11,11,11,12,10,9,10,5,]
plane=[330, 294, 261, 294, 330, 330, 330, 294, 294, 294,
330, 392, 392]
buzzer_pin=17
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(buzzer_pin,GPIO.OUT)
try:
pwm=GPIO.PWM(buzzer_pin,100);
pwm.start(100)
pwm.ChangeDutyCycle(90)
for i in plane:
pwm.ChangeFrequency(i)
time.sleep(0.5)
#for i in range(len(num)):
# pwm.ChangeFrequency(list[num[i]])
# time.sleep(1)
except KeyboardInterrupt:
pwm.stop()
GPIO.cleanup()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteFeature
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_sync]
from google.cloud import aiplatform_v1beta1
def sample_delete_feature():
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteFeatureRequest(
name="name_value",
)
# Make the request
operation = client.delete_feature(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_DeleteFeature_sync]
|
# Copyright © 2021 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bring in the common JWT Manager."""
from functools import wraps
from http import HTTPStatus
from flask import g, request
from flask_jwt_oidc import JwtManager
from jose import jwt as josejwt
jwt = (
JwtManager()
) # pylint: disable=invalid-name; lower case name as used by convention in most Flask apps
class Auth:
"""Extending JwtManager to include additional functionalities."""
@classmethod
def require(cls, f):
"""Validate the Bearer Token."""
@jwt.requires_auth
@wraps(f)
def decorated(*args, **kwargs):
g.authorization_header = request.headers.get("Authorization", None)
g.token_info = g.jwt_oidc_token_info
return f(*args, **kwargs)
return decorated
@classmethod
def ismemberofgroups(cls, groups):
"""Check that at least one of the realm groups are in the token.
Args:
groups [str,]: Comma separated list of valid roles
"""
def decorated(f):
# Token verification is commented here with an expectation to use this decorator in conjuction with require.
#@Auth.require
@wraps(f)
def wrapper(*args, **kwargs):
_groups = groups.split(',')
token = jwt.get_token_auth_header()
unverified_claims = josejwt.get_unverified_claims(token)
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
exists = False
for group in _groups:
if group in usergroups:
exists = True
retval = "Unauthorized" , 401
if exists == True:
return f(*args, **kwargs)
return retval
return wrapper
return decorated
auth = (
Auth()
)
class AuthHelper:
@classmethod
def getuserid(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
return unverified_claims['preferred_username']
@classmethod
def getusername(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
return unverified_claims['name']
@classmethod
def isministrymember(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
for group in usergroups:
if group.endswith("Ministry Team"):
return True
return False
@classmethod
def getusergroups(cls):
token = request.headers.get("Authorization", None)
unverified_claims = josejwt.get_unverified_claims(token.partition("Bearer")[2].strip())
usergroups = unverified_claims['groups']
usergroups = [usergroup.replace('/','',1) if usergroup.startswith('/') else usergroup for usergroup in usergroups]
return usergroups
|
#!/usr/bin/env python
"""Memory Map File Analyser for ARM mbed"""
import sys
import os
import re
import csv
import json
import argparse
from prettytable import PrettyTable
from tools.utils import argparse_filestring_type, \
argparse_lowercase_hyphen_type, argparse_uppercase_type
DEBUG = False
RE_ARMCC = re.compile(
r'^\s+0x(\w{8})\s+0x(\w{8})\s+(\w+)\s+(\w+)\s+(\d+)\s+[*]?.+\s+(.+)$')
RE_IAR = re.compile(
r'^\s+(.+)\s+(zero|const|ro code|inited|uninit)\s'
r'+0x(\w{8})\s+0x(\w+)\s+(.+)\s.+$')
class MemapParser(object):
"""An object that represents parsed results, parses the memory map files,
and writes out different file types of memory results
"""
print_sections = ('.text', '.data', '.bss')
misc_flash_sections = ('.interrupts', '.flash_config')
other_sections = ('.interrupts_ram', '.init', '.ARM.extab',
'.ARM.exidx', '.ARM.attributes', '.eh_frame',
'.init_array', '.fini_array', '.jcr', '.stab',
'.stabstr', '.ARM.exidx', '.ARM')
# sections to print info (generic for all toolchains)
sections = ('.text', '.data', '.bss', '.heap', '.stack')
def __init__(self):
""" General initialization
"""
# list of all modules and their sections
self.modules = dict()
# sections must be defined in this order to take irrelevant out
self.all_sections = self.sections + self.other_sections + \
self.misc_flash_sections + ('unknown', 'OUTPUT')
# list of all object files and mappting to module names
self.object_to_module = dict()
# Memory usage summary structure
self.mem_summary = dict()
def module_add(self, module_name, size, section):
""" Adds a module / section to the list
Positional arguments:
module_name - name of the module to add
size - the size of the module being added
section - the section the module contributes to
"""
if module_name in self.modules:
self.modules[module_name][section] += size
else:
temp_dic = dict()
for section_idx in self.all_sections:
temp_dic[section_idx] = 0
temp_dic[section] = size
self.modules[module_name] = temp_dic
def check_new_section_gcc(self, line):
""" Check whether a new section in a map file has been detected (only
applies to gcc)
Positional arguments:
line - the line to check for a new section
"""
for i in self.all_sections:
if line.startswith(i):
# should name of the section (assuming it's a known one)
return i
if line.startswith('.'):
return 'unknown' # all others are classified are unknown
else:
return False # everything else, means no change in section
@staticmethod
def path_object_to_module_name(txt):
""" Parse a path to object file to extract it's module and object data
Positional arguments:
txt - the path to parse the object and module name from
"""
txt = txt.replace('\\', '/')
rex_mbed_os_name = r'^.+mbed-os\/(.+)\/(.+\.o)$'
test_rex_mbed_os_name = re.match(rex_mbed_os_name, txt)
if test_rex_mbed_os_name:
object_name = test_rex_mbed_os_name.group(2)
data = test_rex_mbed_os_name.group(1).split('/')
ndata = len(data)
if ndata == 1:
module_name = data[0]
else:
module_name = data[0] + '/' + data[1]
return [module_name, object_name]
else:
return ['Misc', ""]
def parse_section_gcc(self, line):
""" Parse data from a section of gcc map file
examples:
0x00004308 0x7c ./.build/K64F/GCC_ARM/mbed-os/hal/targets/hal/TARGET_Freescale/TARGET_KPSDK_MCUS/spi_api.o
.text 0x00000608 0x198 ./.build/K64F/GCC_ARM/mbed-os/core/mbed-rtos/rtx/TARGET_CORTEX_M/TARGET_RTOS_M4_M7/TOOLCHAIN_GCC/HAL_CM4.o
Positional arguments:
line - the line to parse a section from
"""
rex_address_len_name = re.compile(
r'^\s+.*0x(\w{8,16})\s+0x(\w+)\s(.+)$')
test_address_len_name = re.match(rex_address_len_name, line)
if test_address_len_name:
if int(test_address_len_name.group(2), 16) == 0: # size == 0
return ["", 0] # no valid entry
else:
m_name, _ = self.path_object_to_module_name(
test_address_len_name.group(3))
m_size = int(test_address_len_name.group(2), 16)
return [m_name, m_size]
else: # special corner case for *fill* sections
# example
# *fill* 0x0000abe4 0x4
rex_address_len = r'^\s+\*fill\*\s+0x(\w{8,16})\s+0x(\w+).*$'
test_address_len = re.match(rex_address_len, line)
if test_address_len:
if int(test_address_len.group(2), 16) == 0: # size == 0
return ["", 0] # no valid entry
else:
m_name = 'Fill'
m_size = int(test_address_len.group(2), 16)
return [m_name, m_size]
else:
return ["", 0] # no valid entry
def parse_map_file_gcc(self, file_desc):
""" Main logic to decode gcc map files
Positional arguments:
file_desc - a stream object to parse as a gcc map file
"""
current_section = 'unknown'
with file_desc as infile:
# Search area to parse
for line in infile:
if line.startswith('Linker script and memory map'):
current_section = "unknown"
break
# Start decoding the map file
for line in infile:
change_section = self.check_new_section_gcc(line)
if change_section == "OUTPUT": # finish parsing file: exit
break
elif change_section != False:
current_section = change_section
[module_name, module_size] = self.parse_section_gcc(line)
if module_size == 0 or module_name == "":
pass
else:
self.module_add(module_name, module_size, current_section)
if DEBUG:
print "Line: %s" % line,
print "Module: %s\tSection: %s\tSize: %s" % \
(module_name, current_section, module_size)
raw_input("----------")
def parse_section_armcc(self, line):
""" Parse data from an armcc map file
Examples of armcc map file:
Base_Addr Size Type Attr Idx E Section Name Object
0x00000000 0x00000400 Data RO 11222 RESET startup_MK64F12.o
0x00000410 0x00000008 Code RO 49364 * !!!main c_w.l(__main.o)
Positional arguments:
line - the line to parse the section data from
"""
test_rex_armcc = re.match(RE_ARMCC, line)
if test_rex_armcc:
size = int(test_rex_armcc.group(2), 16)
if test_rex_armcc.group(4) == 'RO':
section = '.text'
else:
if test_rex_armcc.group(3) == 'Data':
section = '.data'
elif test_rex_armcc.group(3) == 'Zero':
section = '.bss'
else:
print "BUG armcc map parser"
raw_input()
# lookup object in dictionary and return module name
object_name = test_rex_armcc.group(6)
if object_name in self.object_to_module:
module_name = self.object_to_module[object_name]
else:
module_name = 'Misc'
return [module_name, size, section]
else:
return ["", 0, ""] # no valid entry
def parse_section_iar(self, line):
""" Parse data from an IAR map file
Examples of IAR map file:
Section Kind Address Size Object
.intvec ro code 0x00000000 0x198 startup_MK64F12.o [15]
.rodata const 0x00000198 0x0 zero_init3.o [133]
.iar.init_table const 0x00008384 0x2c - Linker created -
Initializer bytes const 0x00000198 0xb2 <for P3 s0>
.data inited 0x20000000 0xd4 driverAtmelRFInterface.o [70]
.bss zero 0x20000598 0x318 RTX_Conf_CM.o [4]
.iar.dynexit uninit 0x20001448 0x204 <Block tail>
HEAP uninit 0x20001650 0x10000 <Block tail>
Positional_arguments:
line - the line to parse section data from
"""
test_rex_iar = re.match(RE_IAR, line)
if test_rex_iar:
size = int(test_rex_iar.group(4), 16)
if test_rex_iar.group(2) == 'const' or \
test_rex_iar.group(2) == 'ro code':
section = '.text'
elif test_rex_iar.group(2) == 'zero' or \
test_rex_iar.group(2) == 'uninit':
if test_rex_iar.group(1)[0:4] == 'HEAP':
section = '.heap'
elif test_rex_iar.group(1)[0:6] == 'CSTACK':
section = '.stack'
else:
section = '.bss' # default section
elif test_rex_iar.group(2) == 'inited':
section = '.data'
else:
print "BUG IAR map parser"
raw_input()
# lookup object in dictionary and return module name
object_name = test_rex_iar.group(5)
if object_name in self.object_to_module:
module_name = self.object_to_module[object_name]
else:
module_name = 'Misc'
return [module_name, size, section]
else:
return ["", 0, ""] # no valid entry
def parse_map_file_armcc(self, file_desc):
""" Main logic to decode armc5 map files
Positional arguments:
file_desc - a file like object to parse as an armc5 map file
"""
with file_desc as infile:
# Search area to parse
for line in infile:
if line.startswith(' Base Addr Size'):
break
# Start decoding the map file
for line in infile:
[name, size, section] = self.parse_section_armcc(line)
if size == 0 or name == "" or section == "":
pass
else:
self.module_add(name, size, section)
def parse_map_file_iar(self, file_desc):
""" Main logic to decode IAR map files
Positional arguments:
file_desc - a file like object to parse as an IAR map file
"""
with file_desc as infile:
# Search area to parse
for line in infile:
if line.startswith(' Section '):
break
# Start decoding the map file
for line in infile:
[name, size, section] = self.parse_section_iar(line)
if size == 0 or name == "" or section == "":
pass
else:
self.module_add(name, size, section)
def search_objects(self, path, toolchain):
""" Check whether the specified map file matches with the toolchain.
Searches for object files and creates mapping: object --> module
Positional arguments:
path - the path to an object file
toolchain - the toolchain used to build the object file
"""
path = path.replace('\\', '/')
# check location of map file
rex = r'^(.+\/)' + re.escape(toolchain) + r'\/(.+\.map)$'
test_rex = re.match(rex, path)
if test_rex:
search_path = test_rex.group(1) + toolchain + '/mbed-os/'
else:
# It looks this is not an mbed project
# object-to-module mapping cannot be generated
print "Warning: specified toolchain doesn't match with"\
" path to the memory map file."
return
for root, _, obj_files in os.walk(search_path):
for obj_file in obj_files:
if obj_file.endswith(".o"):
module_name, object_name = self.path_object_to_module_name(
os.path.join(root, obj_file))
if object_name in self.object_to_module:
if DEBUG:
print "WARNING: multiple usages of object file: %s"\
% object_name
print " Current: %s" % \
self.object_to_module[object_name]
print " New: %s" % module_name
print " "
else:
self.object_to_module.update({object_name:module_name})
export_formats = ["json", "csv-ci", "table"]
def generate_output(self, export_format, file_output=None):
""" Generates summary of memory map data
Positional arguments:
export_format - the format to dump
Keyword arguments:
file_desc - descriptor (either stdout or file)
"""
try:
if file_output:
file_desc = open(file_output, 'wb')
else:
file_desc = sys.stdout
except IOError as error:
print "I/O error({0}): {1}".format(error.errno, error.strerror)
return False
subtotal = dict()
for k in self.sections:
subtotal[k] = 0
# Calculate misc flash sections
misc_flash_mem = 0
for i in self.modules:
for k in self.misc_flash_sections:
if self.modules[i][k]:
misc_flash_mem += self.modules[i][k]
json_obj = []
for i in sorted(self.modules):
row = []
json_obj.append({
"module":i,
"size":{
k:self.modules[i][k] for k in self.print_sections
}
})
summary = {
'summary':{
'static_ram': (subtotal['.data'] + subtotal['.bss']),
'heap': (subtotal['.heap']),
'stack': (subtotal['.stack']),
'total_ram': (subtotal['.data'] + subtotal['.bss'] +
subtotal['.heap']+subtotal['.stack']),
'total_flash': (subtotal['.text'] + subtotal['.data'] +
misc_flash_mem),
}
}
self.mem_summary = json_obj + [summary]
to_call = {'json': self.generate_json,
'csv-ci': self.generate_csv,
'table': self.generate_table}[export_format]
to_call(subtotal, misc_flash_mem, file_desc)
if file_desc is not sys.stdout:
file_desc.close()
def generate_json(self, _, dummy, file_desc):
"""Generate a json file from a memory map
Positional arguments:
subtotal - total sizes for each module
misc_flash_mem - size of misc flash sections
file_desc - the file to write out the final report to
"""
file_desc.write(json.dumps(self.mem_summary, indent=4))
file_desc.write('\n')
def generate_csv(self, subtotal, misc_flash_mem, file_desc):
"""Generate a CSV file from a memoy map
Positional arguments:
subtotal - total sizes for each module
misc_flash_mem - size of misc flash sections
file_desc - the file to write out the final report to
"""
csv_writer = csv.writer(file_desc, delimiter=',',
quoting=csv.QUOTE_NONE)
csv_module_section = []
csv_sizes = []
for i in sorted(self.modules):
for k in self.print_sections:
csv_module_section += [i+k]
csv_sizes += [self.modules[i][k]]
csv_module_section += ['static_ram']
csv_sizes += [subtotal['.data']+subtotal['.bss']]
csv_module_section += ['heap']
if subtotal['.heap'] == 0:
csv_sizes += ['unknown']
else:
csv_sizes += [subtotal['.heap']]
csv_module_section += ['stack']
if subtotal['.stack'] == 0:
csv_sizes += ['unknown']
else:
csv_sizes += [subtotal['.stack']]
csv_module_section += ['total_ram']
csv_sizes += [subtotal['.data'] + subtotal['.bss'] +
subtotal['.heap'] + subtotal['.stack']]
csv_module_section += ['total_flash']
csv_sizes += [subtotal['.text']+subtotal['.data']+misc_flash_mem]
csv_writer.writerow(csv_module_section)
csv_writer.writerow(csv_sizes)
def generate_table(self, subtotal, misc_flash_mem, file_desc):
"""Generate a table from a memoy map
Positional arguments:
subtotal - total sizes for each module
misc_flash_mem - size of misc flash sections
file_desc - the file to write out the final report to
"""
# Create table
columns = ['Module']
columns.extend(self.print_sections)
table = PrettyTable(columns)
table.align["Module"] = "l"
for col in self.print_sections:
table.align[col] = 'r'
for i in list(self.print_sections):
table.align[i] = 'r'
for i in sorted(self.modules):
row = [i]
for k in self.sections:
subtotal[k] += self.modules[i][k]
for k in self.print_sections:
row.append(self.modules[i][k])
table.add_row(row)
subtotal_row = ['Subtotals']
for k in self.print_sections:
subtotal_row.append(subtotal[k])
table.add_row(subtotal_row)
file_desc.write(table.get_string())
file_desc.write('\n')
if subtotal['.heap'] == 0:
file_desc.write("Allocated Heap: unknown\n")
else:
file_desc.write("Allocated Heap: %s bytes\n" %
str(subtotal['.heap']))
if subtotal['.stack'] == 0:
file_desc.write("Allocated Stack: unknown\n")
else:
file_desc.write("Allocated Stack: %s bytes\n" %
str(subtotal['.stack']))
file_desc.write("Total Static RAM memory (data + bss): %s bytes\n" %
(str(subtotal['.data'] + subtotal['.bss'])))
file_desc.write(
"Total RAM memory (data + bss + heap + stack): %s bytes\n"
% (str(subtotal['.data'] + subtotal['.bss'] + subtotal['.heap'] +
subtotal['.stack'])))
file_desc.write("Total Flash memory (text + data + misc): %s bytes\n" %
(str(subtotal['.text'] + subtotal['.data'] +
misc_flash_mem)))
toolchains = ["ARM", "ARM_STD", "ARM_MICRO", "GCC_ARM", "IAR"]
def parse(self, mapfile, toolchain):
""" Parse and decode map file depending on the toolchain
Positional arguments:
mapfile - the file name of the memory map file
toolchain - the toolchain used to create the file
"""
result = True
try:
with open(mapfile, 'r') as file_input:
if toolchain == "ARM" or toolchain == "ARM_STD" or\
toolchain == "ARM_MICRO":
self.search_objects(os.path.abspath(mapfile), "ARM")
self.parse_map_file_armcc(file_input)
elif toolchain == "GCC_ARM":
self.parse_map_file_gcc(file_input)
elif toolchain == "IAR":
self.search_objects(os.path.abspath(mapfile), toolchain)
self.parse_map_file_iar(file_input)
else:
result = False
except IOError as error:
print "I/O error({0}): {1}".format(error.errno, error.strerror)
result = False
return result
def main():
"""Entry Point"""
version = '0.3.11'
# Parser handling
parser = argparse.ArgumentParser(
description="Memory Map File Analyser for ARM mbed\nversion %s" %
version)
parser.add_argument(
'file', type=argparse_filestring_type, help='memory map file')
parser.add_argument(
'-t', '--toolchain', dest='toolchain',
help='select a toolchain used to build the memory map file (%s)' %
", ".join(MemapParser.toolchains),
required=True,
type=argparse_uppercase_type(MemapParser.toolchains, "toolchain"))
parser.add_argument(
'-o', '--output', help='output file name', required=False)
parser.add_argument(
'-e', '--export', dest='export', required=False, default='table',
type=argparse_lowercase_hyphen_type(MemapParser.export_formats,
'export format'),
help="export format (examples: %s: default)" %
", ".join(MemapParser.export_formats))
parser.add_argument('-v', '--version', action='version', version=version)
# Parse/run command
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# Create memap object
memap = MemapParser()
# Parse and decode a map file
if args.file and args.toolchain:
if memap.parse(args.file, args.toolchain) is False:
sys.exit(0)
# Write output in file
if args.output != None:
memap.generate_output(args.export, args.output)
else: # Write output in screen
memap.generate_output(args.export)
sys.exit(0)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
"""
1. 实现微信消息的抓取
:author Wang Weiwei <email>weiwei02@vip.qq.com / weiwei.wang@100credit.com</email>
:sine 2017/8/11
:version 1.0
"""
import itchat,time
import queue
import _thread
XIAOBING_ID = 'xiaoice-ms'
msgQueue = queue.Queue(maxsize=100)
@itchat.msg_register(itchat.content.TEXT, isMpChat=True)
def print_content(msg):
if msg["FromUserName"] == XIAOBING_ID:
msgQueue.put(msg["Text"])
print("公众号消息", msg["Text"])
@itchat.msg_register(itchat.content.TEXT, isFriendChat=True)
def print_contents(msg):
print(msg)
itchat.send_msg(msg["Text"], toUserName="@3c0f48b3cec6e9d90fe03a8a0edb78eb")
return msgQueue.get()
itchat.auto_login(hotReload=True)
itchat.start_receiving()
# mps = itchat.get_mps()
#
# a = itchat.send_msg("你是谁", toUserName="@3c0f48b3cec6e9d90fe03a8a0edb78eb")
#
# message = itchat.get_msg()
# print("回复信息: ", message)
_thread.start_new_thread(itchat.run, ())
|
""" isort:skip_file """
import pickle
import pytest
dask = pytest.importorskip("dask") # isort:skip
distributed = pytest.importorskip("distributed") # isort:skip
from dask.distributed import Client, Lock
from distributed.utils_test import cluster, gen_cluster
from distributed.utils_test import loop
from distributed.client import futures_of
import xarray as xr
from xarray.backends.locks import HDF5_LOCK, CombinedLock
from xarray.tests.test_backends import (
ON_WINDOWS,
create_tmp_file,
create_tmp_geotiff,
open_example_dataset,
)
from xarray.tests.test_dataset import create_test_data
from . import (
assert_allclose,
has_h5netcdf,
has_netCDF4,
requires_rasterio,
has_scipy,
requires_zarr,
requires_cfgrib,
)
# this is to stop isort throwing errors. May have been easier to just use
# `isort:skip` in retrospect
da = pytest.importorskip("dask.array")
loop = loop # loop is an imported fixture, which flake8 has issues ack-ing
@pytest.fixture
def tmp_netcdf_filename(tmpdir):
return str(tmpdir.join("testfile.nc"))
ENGINES = []
if has_scipy:
ENGINES.append("scipy")
if has_netCDF4:
ENGINES.append("netcdf4")
if has_h5netcdf:
ENGINES.append("h5netcdf")
NC_FORMATS = {
"netcdf4": [
"NETCDF3_CLASSIC",
"NETCDF3_64BIT_OFFSET",
"NETCDF3_64BIT_DATA",
"NETCDF4_CLASSIC",
"NETCDF4",
],
"scipy": ["NETCDF3_CLASSIC", "NETCDF3_64BIT"],
"h5netcdf": ["NETCDF4"],
}
ENGINES_AND_FORMATS = [
("netcdf4", "NETCDF3_CLASSIC"),
("netcdf4", "NETCDF4_CLASSIC"),
("netcdf4", "NETCDF4"),
("h5netcdf", "NETCDF4"),
("scipy", "NETCDF3_64BIT"),
]
@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
def test_dask_distributed_netcdf_roundtrip(
loop, tmp_netcdf_filename, engine, nc_format
):
if engine not in ENGINES:
pytest.skip("engine not available")
chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data().chunk(chunks)
if engine == "scipy":
with pytest.raises(NotImplementedError):
original.to_netcdf(
tmp_netcdf_filename, engine=engine, format=nc_format
)
return
original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
with xr.open_dataset(
tmp_netcdf_filename, chunks=chunks, engine=engine
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
def test_dask_distributed_read_netcdf_integration_test(
loop, tmp_netcdf_filename, engine, nc_format
):
if engine not in ENGINES:
pytest.skip("engine not available")
chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data()
original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
with xr.open_dataset(
tmp_netcdf_filename, chunks=chunks, engine=engine
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@requires_zarr
@pytest.mark.parametrize("consolidated", [True, False])
@pytest.mark.parametrize("compute", [True, False])
def test_dask_distributed_zarr_integration_test(loop, consolidated, compute) -> None:
if consolidated:
pytest.importorskip("zarr", minversion="2.2.1.dev2")
write_kwargs = {"consolidated": True}
read_kwargs = {"backend_kwargs": {"consolidated": True}}
else:
write_kwargs = read_kwargs = {} # type: ignore
chunks = {"dim1": 4, "dim2": 3, "dim3": 5}
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
original = create_test_data().chunk(chunks)
with create_tmp_file(
allow_cleanup_failure=ON_WINDOWS, suffix=".zarrc"
) as filename:
maybe_futures = original.to_zarr(
filename, compute=compute, **write_kwargs
)
if not compute:
maybe_futures.compute()
with xr.open_dataset(
filename, chunks="auto", engine="zarr", **read_kwargs
) as restored:
assert isinstance(restored.var1.data, da.Array)
computed = restored.compute()
assert_allclose(original, computed)
@requires_rasterio
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_dask_distributed_rasterio_integration_test(loop) -> None:
with create_tmp_geotiff() as (tmp_file, expected):
with cluster() as (s, [a, b]):
with pytest.warns(DeprecationWarning), Client(s["address"], loop=loop):
da_tiff = xr.open_rasterio(tmp_file, chunks={"band": 1})
assert isinstance(da_tiff.data, da.Array)
actual = da_tiff.compute()
assert_allclose(actual, expected)
@requires_cfgrib
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_dask_distributed_cfgrib_integration_test(loop) -> None:
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop):
with open_example_dataset(
"example.grib", engine="cfgrib", chunks={"time": 1}
) as ds:
with open_example_dataset("example.grib", engine="cfgrib") as expected:
assert isinstance(ds["t"].data, da.Array)
actual = ds.compute()
assert_allclose(actual, expected)
@pytest.mark.xfail(reason="https://github.com/pydata/xarray/pull/6211")
@gen_cluster(client=True)
async def test_async(c, s, a, b) -> None:
x = create_test_data()
assert not dask.is_dask_collection(x)
y = x.chunk({"dim2": 4}) + 10
assert dask.is_dask_collection(y)
assert dask.is_dask_collection(y.var1)
assert dask.is_dask_collection(y.var2)
z = y.persist()
assert str(z)
assert dask.is_dask_collection(z)
assert dask.is_dask_collection(z.var1)
assert dask.is_dask_collection(z.var2)
assert len(y.__dask_graph__()) > len(z.__dask_graph__())
assert not futures_of(y)
assert futures_of(z)
future = c.compute(z)
w = await future
assert not dask.is_dask_collection(w)
assert_allclose(x + 10, w)
assert s.tasks
def test_hdf5_lock() -> None:
assert isinstance(HDF5_LOCK, dask.utils.SerializableLock)
@pytest.mark.xfail(reason="https://github.com/pydata/xarray/pull/6211")
@gen_cluster(client=True)
async def test_serializable_locks(c, s, a, b) -> None:
def f(x, lock=None):
with lock:
return x + 1
# note, the creation of Lock needs to be done inside a cluster
for lock in [
HDF5_LOCK,
Lock(),
Lock("filename.nc"),
CombinedLock([HDF5_LOCK]),
CombinedLock([HDF5_LOCK, Lock("filename.nc")]),
]:
futures = c.map(f, list(range(10)), lock=lock)
await c.gather(futures)
lock2 = pickle.loads(pickle.dumps(lock))
assert type(lock) == type(lock2)
|
#!/usr/bin/env python3
#
# LMS-AutoPlay
#
# Copyright (c) 2020 Craig Drummond <craig.p.drummond@gmail.com>
# MIT license.
#
import hashlib
import os
import re
import requests
import shutil
import sys
REPO_XML = "repo.xml"
PLUGIN_NAME = "VolumeCheck"
PLUGIN_GIT_NAME = "lms-volumecheck"
def info(s):
print("INFO: %s" %s)
def error(s):
print("ERROR: %s" % s)
exit(-1)
def usage():
print("Usage: %s <major>.<minor>.<patch>" % sys.argv[0])
exit(-1)
def checkVersion(version):
try:
parts=version.split('.')
major=int(parts[0])
minor=int(parts[1])
patch=int(parts[2])
except:
error("Invalid version number")
def releaseUrl(version):
return "https://github.com/CDrummond/%s/releases/download/%s/%s-%s.zip" % (PLUGIN_GIT_NAME, version, PLUGIN_GIT_NAME, version)
def checkVersionExists(version):
url = releaseUrl(version)
info("Checking %s" % url)
request = requests.head(url)
if request.status_code == 200 or request.status_code == 302:
error("Version already exists")
def updateLine(line, startStr, endStr, updateStr):
start=line.find(startStr)
if start!=-1:
start+=len(startStr)
end=line.find(endStr, start)
if end!=-1:
return "%s%s%s" % (line[:start], updateStr, line[end:])
return None
def updateInstallXml(version):
lines=[]
updated=False
installXml = "%s/install.xml" % PLUGIN_NAME
info("Updating %s" % installXml)
with open(installXml, "r") as f:
lines=f.readlines()
for i in range(len(lines)):
updated = updateLine(lines[i], "<version>", "</version>", version)
if updated:
lines[i]=updated
updated=True
break
if not updated:
error("Failed to update version in %s" % installXml)
with open(installXml, "w") as f:
for line in lines:
f.write(line)
def createZip(version):
info("Creating ZIP")
zipFile="%s-%s" % (PLUGIN_GIT_NAME, version)
shutil.make_archive(zipFile, 'zip', PLUGIN_NAME)
zipFile+=".zip"
return zipFile
def getSha1Sum(zipFile):
info("Generating SHA1")
sha1 = hashlib.sha1()
with open(zipFile, 'rb') as f:
while True:
data = f.read(65535)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def updateRepoXml(repo, version, zipFile, sha1, pluginName=None):
lines=[]
updatedVersion=False
updatedUrl=False
updatedSha=False
info("Updating %s" % repo)
inSection = pluginName is None
with open(repo, "r") as f:
lines=f.readlines()
for i in range(len(lines)):
if pluginName is not None and '<plugin name="' in lines[i]:
inSection = pluginName in lines[i]
if inSection:
updated = updateLine(lines[i], 'version="', '"', version)
if updated:
lines[i]=updated
updatedVersion=True
updated = updateLine(lines[i], '<url>', '</url>', releaseUrl(version))
if updated:
lines[i]=updated
updatedUrl=True
updated = updateLine(lines[i], '<sha>', '</sha>', sha1)
if updated:
lines[i]=updated
updatedSha=True
if updatedVersion and updatedUrl and updatedSha:
break
if not updatedVersion:
error("Failed to update version in %s" % repo)
if not updatedUrl:
error("Failed to url version in %s" % repo)
if not updatedSha:
error("Failed to sha version in %s" % repo)
with open(repo, "w") as f:
for line in lines:
f.write(line)
if 1==len(sys.argv):
usage()
version=sys.argv[1]
if version!="test":
checkVersion(version)
checkVersionExists(version)
updateInstallXml(version)
zipFile = createZip(version)
sha1 = getSha1Sum(zipFile)
if version!="test" and os.path.exists(REPO_XML):
updateRepoXml(REPO_XML, version, zipFile, sha1, PLUGIN_NAME)
|
import json
import os
from base64 import b64decode
from datetime import timedelta
from threading import Semaphore
import github
from dotmap import DotMap
from github import GithubException
from conan_inquiry.transformers.base import BaseGithubTransformer
from conan_inquiry.util.general import render_readme
from conan_inquiry.util.travis import repo_has_travis
class GithubTransformer(BaseGithubTransformer):
"""
Populates empty urls based on the Github url, if given
"""
github_limit = Semaphore(value=15)
def transform(self, package):
if 'github' in package.urls:
with self.github_limit:
github_id = package.urls.github.replace('.git', '')
try:
self._set_repo(github_id)
v3data = self.cache.get(github_id, timedelta(days=2), 'github_api',
lambda: self._v3_requests(),
locked_getter=False)
num_contributors = v3data['num_contributors']
latest_commit = v3data['latest_commit']
clone_url = v3data['clone_url']
repo_owner = v3data['repo_owner']
repo_name = v3data['repo_name']
graph_request = '''
query Repo($owner:String!, $name:String!) {
repo: repository(owner: $owner, name: $name) {
owner {
login
... on Organization {
name
orgEmail: email
websiteUrl
}
... on User {
name
userEmail: email
websiteUrl
}
}
tree: object(expression: "HEAD:") {
... on Tree {
entries {
name
}
}
}
repositoryTopics(first: 20) {
totalCount
nodes {
topic {
name
}
}
}
forks {
totalCount
}
description
hasIssuesEnabled
hasWikiEnabled
homepageUrl
url
openIssues: issues(states: OPEN) {
totalCount
}
closedIssues: issues(states: CLOSED) {
totalCount
}
openPRs: pullRequests(states: OPEN) {
totalCount
}
closedPRs: pullRequests(states: CLOSED) {
totalCount
}
pushedAt
stargazers {
totalCount
}
watchers {
totalCount
}
}
rateLimit {
cost
remaining
}
}
'''
graph_data = self.cache.get(github_id, timedelta(days=2), 'github_graph',
lambda: self.github_graph.execute(
graph_request,
dict(owner=repo_owner,
name=repo_name)),
locked_getter=False)
graph = json.loads(graph_data)['data']
except GithubException:
return package
if graph['repo']['description'] != package.name:
self._set_unless_exists(package, 'description', graph['repo']['description'])
self._set_unless_exists(package.urls, 'website', graph['repo']['homepageUrl'])
self._set_unless_exists(package.urls, 'website', 'https://github.com/' + github_id)
self._set_unless_exists(package.urls, 'code', graph['repo']['url'])
if graph['repo']['hasIssuesEnabled']:
self._set_unless_exists(package.urls, 'issues', graph['repo']['url'] + '/issues')
if graph['repo']['hasWikiEnabled']:
# TODO: check if there is content in the wiki
self._set_unless_exists(package.urls, 'wiki', graph['repo']['url'] + '/wiki')
if repo_has_travis(github_id, self.http):
self._set_unless_exists(package.urls, 'travis',
'https://travis-ci.org/' + github_id)
self._set_unless_exists(package.urls, 'git', clone_url)
try:
def get_readme(repo, github):
readme = repo.get_readme()
rendered = render_readme(readme.path, readme.decoded_content.decode('utf-8'),
graph['repo']['url'],
lambda raw: github.render_markdown(raw, repo).decode('utf-8'))
return dict(url=readme.html_url, content=rendered)
readme = self.cache.get(github_id, timedelta(days=7), 'github_readme',
lambda: get_readme(self.repo, self.github),
locked_getter=False)
self._set_unless_exists(package.urls, 'readme', readme['url'])
self._set_unless_exists(package.files.readme, 'url', readme['url'])
self._set_unless_exists(package.files.readme, 'content', readme['content'])
except github.UnknownObjectException:
pass
for entry in graph['repo']['tree']['entries']:
if os.path.basename(entry['name']).lower() == 'license':
def get_file(repo, name):
f = repo.get_file_contents(name)
return dict(url=f.html_url, string=str(b64decode(f.content)))
file = self.cache.get(github_id + '->' + entry['name'], timedelta(days=28), 'github_file',
lambda: get_file(self.repo, entry['name']),
locked_getter=False)
self._set_unless_exists(package, 'license', file['url'])
self._set_unless_exists(package, '_license_data', file['string'])
break
if 'authors' not in package:
owner = graph['repo']['owner']
if 'userEmail' in owner:
# private repo
name = owner['name'] if owner['name'] is not None else owner['login']
author = DotMap(name=name,
github=owner['login'])
email = owner['userEmail']
website = owner['websiteUrl']
if email is not None:
author.email = email
if website is not None:
author.website = website
package.authors = [author]
else:
# organization repo
name = owner['name'] if owner['name'] is not None else owner['login']
author = DotMap(name=name,
github=owner['login'])
email = owner['orgEmail']
website = owner['websiteUrl']
if email is not None:
author.email = email
if website is not None:
author.website = website
package.authors = [author]
self._set_unless_exists(package.stats, 'github_prs', graph['repo']['openPRs']['totalCount'])
self._set_unless_exists(package.stats, 'github_issues', graph['repo']['openIssues']['totalCount'])
self._set_unless_exists(package.stats, 'github_stars', graph['repo']['stargazers']['totalCount'])
self._set_unless_exists(package.stats, 'github_watchers', graph['repo']['watchers']['totalCount'])
self._set_unless_exists(package.stats, 'github_forks', graph['repo']['forks']['totalCount'])
if num_contributors is not None:
self._set_unless_exists(package.stats, 'github_commits', num_contributors)
self._set_unless_exists(package.stats, 'github_latest_commit', latest_commit)
if 'keywords' not in package:
package.keywords = []
package.keywords.extend([r['topic']['name'] for r in graph['repo']['repositoryTopics']['nodes']])
for recipie in package.recipies:
if 'github' in recipie.urls:
self._set_unless_exists(recipie.urls, 'website', 'https://github.com/' + recipie.urls.github)
self._set_unless_exists(recipie.urls, 'issues', 'https://github.com/' + recipie.urls.github + '/issues')
return package
def _v3_requests(self):
# TODO: the number of commits does not seem to be correct and sometimes fetching doesn't work at all
contributors = self.repo.get_stats_contributors()
if contributors is not None:
num_contributors = sum([c.total for c in contributors])
else:
num_contributors = None
commits = self.repo.get_commits()
latest_commit = commits[0].commit.committer.date.isoformat()
clone_url = self.repo.clone_url
repo_owner = self.repo.owner.login
repo_name = self.repo.name
return dict(num_contributors=num_contributors, latest_commit=latest_commit,
clone_url=clone_url, repo_owner=repo_owner, repo_name=repo_name)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .configuration_store import *
from .get_configuration_store import *
from .list_configuration_store_key_value import *
from .list_configuration_store_keys import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:appconfiguration/v20191001:ConfigurationStore":
return ConfigurationStore(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "appconfiguration/v20191001", _module_instance)
_register_module()
|
"""inventory URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from .views import dashboard
urlpatterns = [
path('admin/', admin.site.urls),
path('', dashboard, name='dashboard'),
path('users/', include('users.urls')),
path('store/', include('store.urls')),
]
|
import higher
from leap import Leap
import numpy as np
import os
import torch
import torch.nn as nn
import gc
def train(model, source_corpus, char2idx, args, device):
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=args.lr_decay, patience=args.patience,
threshold=args.threshold)
best_valid_cosine = 1
for epoch in np.arange(args.n_epochs):
valid_cosine = []
valid_ce = []
model.train()
for batch in np.arange(args.n_batch):
train_contexts, train_targets, train_vocabs, train_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
fixed=args.fixed_shot,
return_inds=True)
optimizer.zero_grad()
if args.lang_model:
pred_emb, pred_ind = model.forward(train_contexts, train_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, train_inds)
loss += -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
else:
pred_emb = model.forward(train_contexts, train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, train_targets).mean()
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
valid_contexts, valid_targets, valid_vocabs, valid_inds = source_corpus.get_batch(args.batch_size,
args.n_shot,
char2idx, device,
use_valid=True,
fixed=args.fixed_shot,
return_inds=True)
if args.lang_model:
pred_emb, pred_ind = model.forward(valid_contexts, valid_vocabs, lang_model=args.lang_model)
loss = nn.functional.cross_entropy(pred_ind, valid_inds).mean()
valid_ce += [loss.cpu().numpy()]
else:
pred_emb = model.forward(valid_contexts, valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, valid_targets).mean()
valid_cosine += [loss.cpu().numpy()]
avg_valid = np.average(valid_cosine)
lr_scheduler.step(avg_valid)
if args.lang_model:
avg_ce = np.average(valid_ce)
print(f"Average cosine loss: {avg_valid}; Average cross entropy loss: {avg_ce}")
else:
print(f"Average cosine loss: {avg_valid}")
if avg_valid < best_valid_cosine:
best_valid_cosine = avg_valid
torch.save(model.state_dict(), os.path.join(args.save_dir, 'model.pt'))
if optimizer.param_groups[0]['lr'] < args.lr_early_stop:
print('LR early stop')
break
def maml_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
meta_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
gc.collect()
source_valid_cosine = []
target_valid_cosine = []
model.train()
with torch.backends.cudnn.flags(benchmark=True):
for meta_batch in np.arange(args.n_meta_batch):
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.maml_inner_lr_init)
meta_optimizer.zero_grad()
with higher.innerloop_ctx(model, inner_optimizer, copy_initial_weights=False) as (fmodel, diffopt):
for inner_batch in np.arange(args.n_inner_batch):
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = fmodel.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
diffopt.step(loss)
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = fmodel.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
meta_optimizer.step()
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'maml_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.maml_lr_early_stop:
print('LR early stop')
break
def leap_adapt(model, source_corpus, target_corpus, char2idx, args, device, lang_model_n_words=0):
model = model.to(device)
leap = Leap(model)
meta_optimizer = torch.optim.Adam(leap.parameters(), lr=args.leap_meta_lr_init)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(meta_optimizer, factor=args.lr_decay,
patience=args.patience, threshold=args.threshold)
best_score = 3
for meta_epoch in np.arange(args.n_meta_epochs):
source_valid_cosine = []
target_valid_cosine = []
model.train()
for meta_batch in np.arange(args.n_meta_batch):
meta_optimizer.zero_grad()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
source_train_contexts, source_train_targets, source_train_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot)
pred_emb = model.forward(source_train_contexts, source_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.init_task()
leap.to(model)
inner_optimizer = torch.optim.Adam(model.parameters(), lr=args.leap_inner_lr_init)
for inner_batch in np.arange(args.n_task_steps):
inner_optimizer.zero_grad()
target_train_contexts, target_train_targets, target_train_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_train_contexts, target_train_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_train_targets).mean()
loss.backward()
leap.update(loss, model)
inner_optimizer.step()
leap.normalize()
meta_optimizer.step()
leap.to(model)
model.eval()
with torch.no_grad():
for batch in np.arange(args.n_batch):
source_valid_contexts, source_valid_targets, source_valid_vocabs = source_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot)
pred_emb = model.forward(source_valid_contexts, source_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, source_valid_targets).mean()
source_valid_cosine += [loss.cpu().numpy()]
target_valid_contexts, target_valid_targets, target_valid_vocabs = target_corpus.get_batch(
args.meta_batch_size, args.n_shot, char2idx, device, use_valid=True, fixed=args.fixed_shot,
repeat_ctxs=args.meta_repeat_ctxs)
pred_emb = model.forward(target_valid_contexts, target_valid_vocabs)
loss = -nn.functional.cosine_similarity(pred_emb, target_valid_targets).mean()
target_valid_cosine += [loss.cpu().numpy()]
avg_source_valid, avg_target_valid = np.average(source_valid_cosine), np.average(target_valid_cosine)
score = avg_target_valid
lr_scheduler.step(score)
print(f"Average source cosine loss: {avg_source_valid}; Average target cosine loss: {avg_target_valid}")
if score < best_score:
best_score = score
torch.save(model.state_dict(), os.path.join(args.save_dir, 'leap_model.pt'))
if meta_optimizer.param_groups[0]['lr'] < args.leap_lr_early_stop:
print('LR early stop')
break
|
import asyncio
import collections
import contextlib
import functools
from typing import (
Any,
DefaultDict,
Dict,
List,
Sequence,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
from async_service import Service
from async_service.asyncio import cleanup_tasks
from cached_property import cached_property
from eth_keys import keys
from p2p.abc import (
BehaviorAPI,
CommandAPI,
ConnectionAPI,
HandlerFn,
HandshakeReceiptAPI,
LogicAPI,
MultiplexerAPI,
NodeAPI,
ProtocolAPI,
SessionAPI,
SubscriptionAPI,
THandshakeReceipt,
TLogic,
TProtocol,
)
from p2p.constants import PEER_READY_TIMEOUT
from p2p.disconnect import DisconnectReason
from p2p.exceptions import (
DuplicateAPI,
MalformedMessage,
PeerConnectionLost,
ReceiptNotFound,
UnknownAPI,
UnknownProtocol,
UnknownProtocolCommand,
)
from p2p.asyncio_utils import create_task, wait_first
from p2p.subscription import Subscription
from p2p.p2p_proto import BaseP2PProtocol, DevP2PReceipt, Disconnect
from p2p.typing import Capabilities
from p2p._utils import get_logger
if TYPE_CHECKING:
from p2p.peer import BasePeer # noqa: F401
class Connection(ConnectionAPI, Service):
_protocol_handlers: DefaultDict[
Type[ProtocolAPI],
Set[HandlerFn]
]
_msg_handlers: Set[HandlerFn]
_command_handlers: DefaultDict[
Type[CommandAPI[Any]],
Set[HandlerFn]
]
_logics: Dict[str, LogicAPI]
def __init__(self,
multiplexer: MultiplexerAPI,
devp2p_receipt: DevP2PReceipt,
protocol_receipts: Sequence[HandshakeReceiptAPI],
is_dial_out: bool) -> None:
self.logger = get_logger('p2p.connection.Connection')
# The multiplexer passed to us will have been started when performing the handshake, so it
# is already reading messages from the transport and storing them in per-protocol queues.
self._multiplexer = multiplexer
# Stop early in case the multiplexer is no longer streaming.
self._multiplexer.raise_if_streaming_error()
self._devp2p_receipt = devp2p_receipt
self.protocol_receipts = tuple(protocol_receipts)
self.is_dial_out = is_dial_out
self._protocol_handlers = collections.defaultdict(set)
self._command_handlers = collections.defaultdict(set)
self._msg_handlers = set()
# An event that controls when the connection will start reading from
# the individual multiplexed protocol streams and feeding handlers.
# This ensures that the connection does not start consuming messages
# before all necessary handlers have been added
self._handlers_ready = asyncio.Event()
self.behaviors_applied = asyncio.Event()
self._logics = {}
def __str__(self) -> str:
return f"Connection-{self.session}"
def __repr__(self) -> str:
return f"<Connection {self.session!r} {self._multiplexer!r} dial_out={self.is_dial_out}>"
@property
def is_streaming_messages(self) -> bool:
return self._handlers_ready.is_set()
def start_protocol_streams(self) -> None:
self._handlers_ready.set()
async def run_behaviors(self, behaviors: Tuple[BehaviorAPI, ...]) -> None:
async with contextlib.AsyncExitStack() as stack:
futures: List[asyncio.Task[Any]] = [
create_task(self.manager.wait_finished(), 'Connection/run_behaviors/wait_finished')]
for behavior in behaviors:
if behavior.should_apply_to(self):
behavior_exit = await stack.enter_async_context(behavior.apply(self))
futures.append(behavior_exit)
self.behaviors_applied.set()
# If wait_first() is called, cleanup_tasks() will be a no-op, but if any post_apply()
# calls raise an exception, it will ensure we don't leak pending tasks that would
# cause asyncio to complain.
async with cleanup_tasks(*futures):
try:
for behavior in behaviors:
behavior.post_apply()
await wait_first(futures)
except PeerConnectionLost:
# Any of our behaviors may propagate a PeerConnectionLost, which is to be
# expected as many Connection APIs used by them can raise that. To avoid a
# DaemonTaskExit since we're returning silently, ensure we're cancelled.
self.manager.cancel()
async def run_peer(self, peer: 'BasePeer') -> None:
"""
Run the peer as a child service.
A peer must always run as a child of the connection so that it has an open connection
until it finishes its cleanup.
"""
self.manager.run_daemon_task(self.run_behaviors, peer.get_behaviors())
await self.behaviors_applied.wait()
self.manager.run_daemon_child_service(peer)
await asyncio.wait_for(peer.manager.wait_started(), timeout=PEER_READY_TIMEOUT)
await asyncio.wait_for(peer.ready.wait(), timeout=PEER_READY_TIMEOUT)
#
# Primary properties of the connection
#
@cached_property
def is_dial_in(self) -> bool:
return not self.is_dial_out
@cached_property
def remote(self) -> NodeAPI:
return self._multiplexer.remote
@cached_property
def session(self) -> SessionAPI:
return self._multiplexer.session
@property
def is_closing(self) -> bool:
return self._multiplexer.is_closing
def __del__(self) -> None:
# This is necessary because the multiplexer passed to our constructor will be streaming,
# and if for some reason our run() method is not called, we'd leave the multiplexer
# streaming indefinitely. We might still get ayncio warnings (about a task being destroyed
# while still pending) if that happens, but this is the best we can do.
self._multiplexer.cancel_streaming()
async def run(self) -> None:
# Our multiplexer will already be streaming in the background (as it was used during
# handshake), so we do this to ensure we only start if it is still running.
self._multiplexer.raise_if_streaming_error()
for protocol in self._multiplexer.get_protocols():
self.manager.run_daemon_task(self._feed_protocol_handlers, protocol)
try:
await self._multiplexer.wait_streaming_finished()
except PeerConnectionLost:
pass
except MalformedMessage as err:
self.logger.debug(
"Disconnecting peer %s for sending MalformedMessage: %s",
self.remote,
err,
exc_info=True,
)
try:
self.get_base_protocol().send(Disconnect(DisconnectReason.BAD_PROTOCOL))
except PeerConnectionLost:
self.logger.debug(
"%s went away while trying to disconnect for MalformedMessage",
self,
)
finally:
self.manager.cancel()
#
# Subscriptions/Handler API
#
async def _feed_protocol_handlers(self, protocol: ProtocolAPI) -> None:
# do not start consuming from the protocol stream until
# `start_protocol_streams` has been called and the multiplexer is
# active.
try:
await asyncio.wait_for(self._handlers_ready.wait(), timeout=10)
except asyncio.TimeoutError as err:
self.logger.warning('Timedout waiting for handler ready signal')
raise asyncio.TimeoutError(
"The handlers ready event was never set. Ensure that "
"`Connection.start_protocol_streams()` is being called"
) from err
async for cmd in self._multiplexer.stream_protocol_messages(protocol):
self.logger.debug2('Handling command: %s', type(cmd))
# local copy to prevent multation while iterating
protocol_handlers = set(self._protocol_handlers[type(protocol)])
for proto_handler_fn in protocol_handlers:
self.logger.debug2(
'Running protocol handler %s for protocol=%s command=%s',
proto_handler_fn,
protocol,
type(cmd),
)
self.manager.run_task(proto_handler_fn, self, cmd)
command_handlers = set(self._command_handlers[type(cmd)])
command_handlers.update(self._msg_handlers)
for cmd_handler_fn in command_handlers:
self.logger.debug2(
'Running command handler %s for protocol=%s command=%s',
cmd_handler_fn,
protocol,
type(cmd),
)
self.manager.run_task(cmd_handler_fn, self, cmd)
# XXX: This ugliness is needed because Multiplexer.stream_protocol_messages() stops as
# soon as the transport is closed, and that may happen immediately after we received a
# Disconnect+EOF from a remote, but before we've had a chance to process the disconnect,
# which would cause a DaemonTaskExit error
# (https://github.com/ethereum/trinity/issues/1733).
if self.is_closing:
try:
await asyncio.wait_for(self.manager.wait_finished(), timeout=2)
except asyncio.TimeoutError:
self.logger.error(
"stream_protocol_messages() terminated but Connection was never cancelled, "
"this will cause the Connection to crash with a DaemonTaskExit")
def add_protocol_handler(self,
protocol_class: Type[ProtocolAPI],
handler_fn: HandlerFn,
) -> SubscriptionAPI:
if not self._multiplexer.has_protocol(protocol_class):
raise UnknownProtocol(
f"Protocol {protocol_class} was not found int he connected "
f"protocols: {self._multiplexer.get_protocols()}"
)
self._protocol_handlers[protocol_class].add(handler_fn)
cancel_fn = functools.partial(
self._protocol_handlers[protocol_class].remove,
handler_fn,
)
return Subscription(cancel_fn)
def add_msg_handler(self, handler_fn: HandlerFn) -> SubscriptionAPI:
self._msg_handlers.add(handler_fn)
cancel_fn = functools.partial(self._msg_handlers.remove, handler_fn)
return Subscription(cancel_fn)
def add_command_handler(self,
command_type: Type[CommandAPI[Any]],
handler_fn: HandlerFn,
) -> SubscriptionAPI:
for protocol in self._multiplexer.get_protocols():
if protocol.supports_command(command_type):
self._command_handlers[command_type].add(handler_fn)
cancel_fn = functools.partial(
self._command_handlers[command_type].remove,
handler_fn,
)
return Subscription(cancel_fn)
else:
raise UnknownProtocolCommand(
f"Command {command_type} was not found in the connected "
f"protocols: {self._multiplexer.get_protocols()}"
)
#
# API extension
#
def add_logic(self, name: str, logic: LogicAPI) -> SubscriptionAPI:
if name in self._logics:
raise DuplicateAPI(
f"There is already an API registered under the name '{name}': "
f"{self._logics[name]}"
)
self._logics[name] = logic
cancel_fn = functools.partial(self.remove_logic, name)
return Subscription(cancel_fn)
def remove_logic(self, name: str) -> None:
self._logics.pop(name)
def has_logic(self, name: str) -> bool:
if self.is_closing:
# This is a safety net, really, as the Peer should never call this if it is no longer
# alive.
raise PeerConnectionLost("Cannot look up subprotocol when connection is closing")
return name in self._logics
def get_logic(self, name: str, logic_type: Type[TLogic]) -> TLogic:
if not self.has_logic(name):
raise UnknownAPI(f"No API registered for the name '{name}'")
logic = self._logics[name]
if isinstance(logic, logic_type):
return logic
else:
raise TypeError(
f"Wrong logic type. expected: {logic_type} got: {type(logic)}"
)
#
# Access to underlying Multiplexer
#
def get_multiplexer(self) -> MultiplexerAPI:
return self._multiplexer
#
# Base Protocol shortcuts
#
def get_base_protocol(self) -> BaseP2PProtocol:
return self._multiplexer.get_base_protocol()
def get_p2p_receipt(self) -> DevP2PReceipt:
return self._devp2p_receipt
#
# Protocol APIS
#
def has_protocol(self, protocol_identifier: Union[ProtocolAPI, Type[ProtocolAPI]]) -> bool:
return self._multiplexer.has_protocol(protocol_identifier)
def get_protocols(self) -> Tuple[ProtocolAPI, ...]:
return self._multiplexer.get_protocols()
def get_protocol_by_type(self, protocol_type: Type[TProtocol]) -> TProtocol:
return self._multiplexer.get_protocol_by_type(protocol_type)
def get_protocol_for_command_type(self, command_type: Type[CommandAPI[Any]]) -> ProtocolAPI:
return self._multiplexer.get_protocol_for_command_type(command_type)
def get_receipt_by_type(self, receipt_type: Type[THandshakeReceipt]) -> THandshakeReceipt:
for receipt in self.protocol_receipts:
if isinstance(receipt, receipt_type):
return receipt
else:
raise ReceiptNotFound(f"Receipt not found: {receipt_type}")
#
# Connection Metadata
#
@cached_property
def remote_capabilities(self) -> Capabilities:
return self._devp2p_receipt.capabilities
@cached_property
def remote_p2p_version(self) -> int:
return self._devp2p_receipt.version
@cached_property
def negotiated_p2p_version(self) -> int:
return self.get_base_protocol().version
@cached_property
def remote_public_key(self) -> keys.PublicKey:
return keys.PublicKey(self._devp2p_receipt.remote_public_key)
@cached_property
def client_version_string(self) -> str:
return self._devp2p_receipt.client_version_string
@cached_property
def safe_client_version_string(self) -> str:
# limit number of chars to be displayed, and try to keep printable ones only
# MAGIC 256: arbitrary, "should be enough for everybody"
if len(self.client_version_string) <= 256:
return self.client_version_string
truncated_client_version_string = self.client_version_string[:253] + '...'
if truncated_client_version_string.isprintable():
return truncated_client_version_string
else:
return repr(truncated_client_version_string)
|
a = str(input('Enter the number you want to reverse:'))
b = (a[::-1])
c = int(b)
print('the reversed number is',c)
|
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
import os
from datetime import timedelta
from django.core.files import File
from django.utils.crypto import get_random_string
from django.utils.timezone import now
from django.utils.translation import gettext_lazy
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from pretix.api.serializers.i18n import I18nAwareModelSerializer
from pretix.api.serializers.order import (
AnswerCreateSerializer, AnswerSerializer, InlineSeatSerializer,
)
from pretix.base.models import Quota, Seat
from pretix.base.models.orders import CartPosition
class CartPositionSerializer(I18nAwareModelSerializer):
answers = AnswerSerializer(many=True)
seat = InlineSeatSerializer()
class Meta:
model = CartPosition
fields = ('id', 'cart_id', 'item', 'variation', 'price', 'attendee_name', 'attendee_name_parts',
'attendee_email', 'voucher', 'addon_to', 'subevent', 'datetime', 'expires', 'includes_tax',
'answers', 'seat')
class CartPositionCreateSerializer(I18nAwareModelSerializer):
answers = AnswerCreateSerializer(many=True, required=False)
expires = serializers.DateTimeField(required=False)
attendee_name = serializers.CharField(required=False, allow_null=True)
seat = serializers.CharField(required=False, allow_null=True)
sales_channel = serializers.CharField(required=False, default='sales_channel')
class Meta:
model = CartPosition
fields = ('cart_id', 'item', 'variation', 'price', 'attendee_name', 'attendee_name_parts', 'attendee_email',
'subevent', 'expires', 'includes_tax', 'answers', 'seat', 'sales_channel')
def create(self, validated_data):
answers_data = validated_data.pop('answers')
if not validated_data.get('cart_id'):
cid = "{}@api".format(get_random_string(48))
while CartPosition.objects.filter(cart_id=cid).exists():
cid = "{}@api".format(get_random_string(48))
validated_data['cart_id'] = cid
if not validated_data.get('expires'):
validated_data['expires'] = now() + timedelta(
minutes=self.context['event'].settings.get('reservation_time', as_type=int)
)
new_quotas = (validated_data.get('variation').quotas.filter(subevent=validated_data.get('subevent'))
if validated_data.get('variation')
else validated_data.get('item').quotas.filter(subevent=validated_data.get('subevent')))
if len(new_quotas) == 0:
raise ValidationError(
gettext_lazy('The product "{}" is not assigned to a quota.').format(
str(validated_data.get('item'))
)
)
for quota in new_quotas:
avail = quota.availability(_cache=self.context['quota_cache'])
if avail[0] != Quota.AVAILABILITY_OK or (avail[1] is not None and avail[1] < 1):
raise ValidationError(
gettext_lazy('There is not enough quota available on quota "{}" to perform '
'the operation.').format(
quota.name
)
)
for quota in new_quotas:
oldsize = self.context['quota_cache'][quota.pk][1]
newsize = oldsize - 1 if oldsize is not None else None
self.context['quota_cache'][quota.pk] = (
Quota.AVAILABILITY_OK if newsize is None or newsize > 0 else Quota.AVAILABILITY_GONE,
newsize
)
attendee_name = validated_data.pop('attendee_name', '')
if attendee_name and not validated_data.get('attendee_name_parts'):
validated_data['attendee_name_parts'] = {
'_legacy': attendee_name
}
seated = validated_data.get('item').seat_category_mappings.filter(subevent=validated_data.get('subevent')).exists()
if validated_data.get('seat'):
if not seated:
raise ValidationError('The specified product does not allow to choose a seat.')
try:
seat = self.context['event'].seats.get(seat_guid=validated_data['seat'], subevent=validated_data.get('subevent'))
except Seat.DoesNotExist:
raise ValidationError('The specified seat does not exist.')
except Seat.MultipleObjectsReturned:
raise ValidationError('The specified seat ID is not unique.')
else:
validated_data['seat'] = seat
if not seat.is_available(
sales_channel=validated_data.get('sales_channel', 'web'),
distance_ignore_cart_id=validated_data['cart_id'],
):
raise ValidationError(gettext_lazy('The selected seat "{seat}" is not available.').format(seat=seat.name))
elif seated:
raise ValidationError('The specified product requires to choose a seat.')
validated_data.pop('sales_channel')
cp = CartPosition.objects.create(event=self.context['event'], **validated_data)
for answ_data in answers_data:
options = answ_data.pop('options')
if isinstance(answ_data['answer'], File):
an = answ_data.pop('answer')
answ = cp.answers.create(**answ_data, answer='')
answ.file.save(os.path.basename(an.name), an, save=False)
answ.answer = 'file://' + answ.file.name
answ.save()
an.close()
else:
answ = cp.answers.create(**answ_data)
answ.options.add(*options)
return cp
def validate_cart_id(self, cid):
if cid and not cid.endswith('@api'):
raise ValidationError('Cart ID should end in @api or be empty.')
return cid
def validate_item(self, item):
if item.event != self.context['event']:
raise ValidationError(
'The specified item does not belong to this event.'
)
if not item.active:
raise ValidationError(
'The specified item is not active.'
)
return item
def validate_subevent(self, subevent):
if self.context['event'].has_subevents:
if not subevent:
raise ValidationError(
'You need to set a subevent.'
)
if subevent.event != self.context['event']:
raise ValidationError(
'The specified subevent does not belong to this event.'
)
elif subevent:
raise ValidationError(
'You cannot set a subevent for this event.'
)
return subevent
def validate(self, data):
if data.get('item'):
if data.get('item').has_variations:
if not data.get('variation'):
raise ValidationError('You should specify a variation for this item.')
else:
if data.get('variation').item != data.get('item'):
raise ValidationError(
'The specified variation does not belong to the specified item.'
)
elif data.get('variation'):
raise ValidationError(
'You cannot specify a variation for this item.'
)
if data.get('attendee_name') and data.get('attendee_name_parts'):
raise ValidationError(
{'attendee_name': ['Do not specify attendee_name if you specified attendee_name_parts.']}
)
return data
|
# Generated by Django 2.1 on 2018-09-07 13:48
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [("analytics", "0007_dependencyusage_version")]
operations = [
migrations.AddField(
model_name="dependency",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name="dependencyusage",
name="timestamp",
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name="dependencyusage",
name="dependency",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="depusage",
to="analytics.Dependency",
),
),
migrations.AlterField(
model_name="dependencyusage",
name="major_version",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="dependencyusage",
name="minor_version",
field=models.BigIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="dependencyusage",
name="patch_version",
field=models.BigIntegerField(blank=True, null=True),
),
]
|
import argparse
import sys
import pprint
from pygobo import OBOParser, query_generate
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Article importer')
argparser.add_argument('--host',help='Redis host',default='0.0.0.0')
argparser.add_argument('--port',help='Redis port',type=int,default=6379)
argparser.add_argument('--password',help='Redis password')
argparser.add_argument('--show-query',help='Show the cypher queries before they are run.',action='store_true',default=False)
argparser.add_argument('--graph',help='The graph name',default='obo')
argparser.add_argument('--scope',help='The scope of the operation',choices=['all','ontology','term','xref','typedef'],action='append')
argparser.add_argument('--option',help='An option to the operation',choices=['show-xrefs'],action='append')
argparser.add_argument('operation',help='The operation to perform',choices=['parse','cypher','load','structure'])
argparser.add_argument('files',nargs='*',help='The files to process.')
args = argparser.parse_args()
pp = pprint.PrettyPrinter(indent=2)
if len(args.files)==0:
sources = [sys.stdin]
else:
sources = args.files
for source in sources:
parser = OBOParser()
with open(source,'r') if type(source)==str else source as input:
ontology = parser.parse(input)
if args.operation=='parse':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','typedef']
if 'ontology' in args.scope:
print('Ontology:')
pp.pprint(ontology.metadata)
if 'term' in args.scope:
print('Terms:')
pp.pprint(ontology.terms)
if 'typedef' in args.scope:
print('Typedefs:')
pp.pprint(ontology.typedefs)
elif args.operation=='cypher':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','xref','typedef']
for query in query_generate(ontology,scope=args.scope):
print(query)
print(';')
elif args.operation=='load':
import redis
from redisgraph import Graph
r = redis.Redis(host=args.host,port=args.port,password=args.password)
graph = Graph(args.graph,r)
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','xref','typedef']
for query in query_generate(ontology,scope=args.scope):
if args.show_query:
print(query)
print(';')
graph.query(query)
elif args.operation=='structure':
if args.scope is None or 'all' in args.scope:
args.scope = ['ontology','term','typedef']
if 'ontology' in args.scope:
print('Ontology:')
for name in sorted(ontology.metadata.keys()):
print(' '+name)
if name=='property_value':
for property in ontology.metadata['property_value'].keys():
print(' '+property)
elif name=='subsetdef':
for property in ontology.metadata['subsetdef'].keys():
print(' '+property)
if 'term' in args.scope:
print('Term:')
structure = {}
xrefs = {}
properties = {}
do_xrefs = ('show-xrefs' in args.option) if args.option is not None else False
for typedef in ontology.terms.keys():
term = ontology.terms[typedef]
if do_xrefs:
for name in term.get('xref',[]):
xrefs[name] = True
for name,value in term.get('property_value',[]):
properties[name] = True
for name in term.keys():
structure[name] = True
for name in sorted(structure.keys()):
print(' '+name)
if do_xrefs and name=='xref':
for xref in sorted(xrefs.keys()):
print(' '+xref)
if name=='property_value':
for property in sorted(properties.keys()):
print(' '+property)
if 'typedef' in args.scope:
print('Typedef:')
structure = {}
for typedef in ontology.typedefs.keys():
for name in ontology.typedefs[typedef].keys():
structure[name] = True
for name in sorted(structure.keys()):
print(' '+name)
|
import keras
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, LSTM, Dropout
from keras.optimizers import Adam
import numpy as np
import random
from collections import deque
class Agent:
def __init__(self, state_size, is_eval=False, model_name=""):
self.state_size = state_size # normalized previous days
self.action_size = 2 # buy, sell
self.memory = deque(maxlen=1000)
self.inventory = []
self.net_worth = []
self.model_name = model_name
self.is_eval = is_eval
self.gamma = 0.95
self.epsilon = 1.0
self.epsilon_min = 0.08
self.epsilon_decay = 0.995
self.model = load_model("models/" + model_name) if is_eval else self._model()
def _model(self):
model = Sequential()
model.add(Dense(units=64, input_dim=self.state_size, activation="relu"))
model.add(Dense(units=32, activation="relu"))
model.add(Dense(units=8, activation="relu"))
model.add(Dense(self.action_size, activation="linear"))
model.compile(loss="mse", optimizer=Adam(lr=0.001))
return model
def act(self, state):
if not self.is_eval and random.random() <= self.epsilon:
return random.randrange(self.action_size)
options = self.model.predict(state)
return np.argmax(options[0])
def expReplay(self, batch_size):
mini_batch = []
l = len(self.memory)
for i in range(l - batch_size + 1, l):
mini_batch.append(self.memory[i])
for state, action, reward, next_state, done in mini_batch:
target = reward
if not done:
target = reward + self.gamma * np.amax(self.model.predict(next_state)[0])
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
|
from ..extensions import db
from flask_login import UserMixin as FlaskLoginUser
from uuid import uuid4
from damgard_jurik import keygen
class Authority(db.Model, FlaskLoginUser):
""" Implements an Authority class that can be accessed by flask-login and
handled by flask-sqlalchemy. Any human has a unique Authority object
for each election in which they are an authority. """
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
email = db.Column(db.Text, nullable=False)
email_confirmed = db.Column(db.Boolean, default=False, nullable=False)
email_key = db.Column(db.Text, unique=True, nullable=False)
election_id = db.Column(db.Integer, db.ForeignKey('election.id'),
nullable=False)
public_key = db.Column(db.PickleType, unique=True, nullable=False)
private_key_ring = db.Column(db.PickleType, nullable=False)
webauthn = db.Column(db.Boolean, nullable=False)
ukey = db.Column(db.String(20), unique=True, nullable=True)
credential_id = db.Column(db.String(250), unique=True, nullable=True)
pub_key = db.Column(db.String(65), unique=True, nullable=True)
sign_count = db.Column(db.Integer, default=0)
rp_id = db.Column(db.String(253), nullable=True)
icon_url = db.Column(db.String(2083), nullable=True)
pw_hash = db.Column(db.Text, nullable=True)
def __init__(self, **kwargs):
self.email_key = str(uuid4())
keypair = keygen(threshold=1, n_shares=1, n_bits=32)
self.public_key = keypair[0]
self.private_key_ring = keypair[1]
for key, value in kwargs.items():
setattr(self, key, value)
def get_id(self):
return self.id
def __repr__(self):
return f'<Authority {self.id} ({self.name})>'
|
expected = "\x1b[3m Rich features \x1b[0m\n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Colors \x1b[0m\x1b[1;31m \x1b[0m✓ \x1b[1;32m4-bit color\x1b[0m \x1b[38;2;51;0;0m█\x1b[0m\x1b[38;2;51;5;0m█\x1b[0m\x1b[38;2;51;11;0m█\x1b[0m\x1b[38;2;51;17;0m█\x1b[0m\x1b[38;2;51;23;0m█\x1b[0m\x1b[38;2;51;29;0m█\x1b[0m\x1b[38;2;51;35;0m█\x1b[0m\x1b[38;2;51;41;0m█\x1b[0m\x1b[38;2;51;47;0m█\x1b[0m\x1b[38;2;49;51;0m█\x1b[0m\x1b[38;2;43;51;0m█\x1b[0m\x1b[38;2;37;51;0m█\x1b[0m\x1b[38;2;31;51;0m█\x1b[0m\x1b[38;2;25;51;0m█\x1b[0m\x1b[38;2;19;51;0m█\x1b[0m\x1b[38;2;13;51;0m█\x1b[0m\x1b[38;2;7;51;0m█\x1b[0m\x1b[38;2;1;51;0m█\x1b[0m\x1b[38;2;0;51;3m█\x1b[0m\x1b[38;2;0;51;9m█\x1b[0m\x1b[38;2;0;51;15m█\x1b[0m\x1b[38;2;0;51;21m█\x1b[0m\x1b[38;2;0;51;27m█\x1b[0m\x1b[38;2;0;51;33m█\x1b[0m\x1b[38;2;0;51;39m█\x1b[0m\x1b[38;2;0;51;45m█\x1b[0m\x1b[38;2;0;50;51m█\x1b[0m\x1b[38;2;0;45;51m█\x1b[0m\x1b[38;2;0;39;51m█\x1b[0m\x1b[38;2;0;33;51m█\x1b[0m\x1b[38;2;0;27;51m█\x1b[0m\x1b[38;2;0;21;51m█\x1b[0m\x1b[38;2;0;15;51m█\x1b[0m\x1b[38;2;0;9;51m█\x1b[0m\x1b[38;2;0;3;51m█\x1b[0m\x1b[38;2;1;0;51m█\x1b[0m\x1b[38;2;7;0;51m█\x1b[0m\x1b[38;2;13;0;51m█\x1b[0m\x1b[38;2;19;0;51m█\x1b[0m\x1b[38;2;25;0;51m█\x1b[0m\x1b[38;2;31;0;51m█\x1b[0m\x1b[38;2;37;0;51m█\x1b[0m\x1b[38;2;43;0;51m█\x1b[0m\x1b[38;2;49;0;51m█\x1b[0m\x1b[38;2;51;0;47m█\x1b[0m\x1b[38;2;51;0;41m█\x1b[0m\x1b[38;2;51;0;35m█\x1b[0m\x1b[38;2;51;0;29m█\x1b[0m\x1b[38;2;51;0;23m█\x1b[0m\x1b[38;2;51;0;17m█\x1b[0m\x1b[38;2;51;0;11m█\x1b[0m\x1b[38;2;51;0;5m█\x1b[0m \n ✓ \x1b[1;34m8-bit color\x1b[0m \x1b[38;2;122;0;0m█\x1b[0m\x1b[38;2;122;14;0m█\x1b[0m\x1b[38;2;122;28;0m█\x1b[0m\x1b[38;2;122;42;0m█\x1b[0m\x1b[38;2;122;56;0m█\x1b[0m\x1b[38;2;122;70;0m█\x1b[0m\x1b[38;2;122;84;0m█\x1b[0m\x1b[38;2;122;98;0m█\x1b[0m\x1b[38;2;122;112;0m█\x1b[0m\x1b[38;2;117;122;0m█\x1b[0m\x1b[38;2;103;122;0m█\x1b[0m\x1b[38;2;89;122;0m█\x1b[0m\x1b[38;2;75;122;0m█\x1b[0m\x1b[38;2;61;122;0m█\x1b[0m\x1b[38;2;47;122;0m█\x1b[0m\x1b[38;2;32;122;0m█\x1b[0m\x1b[38;2;18;122;0m█\x1b[0m\x1b[38;2;4;122;0m█\x1b[0m\x1b[38;2;0;122;9m█\x1b[0m\x1b[38;2;0;122;23m█\x1b[0m\x1b[38;2;0;122;37m█\x1b[0m\x1b[38;2;0;122;51m█\x1b[0m\x1b[38;2;0;122;65m█\x1b[0m\x1b[38;2;0;122;80m█\x1b[0m\x1b[38;2;0;122;94m█\x1b[0m\x1b[38;2;0;122;108m█\x1b[0m\x1b[38;2;0;122;122m█\x1b[0m\x1b[38;2;0;108;122m█\x1b[0m\x1b[38;2;0;94;122m█\x1b[0m\x1b[38;2;0;80;122m█\x1b[0m\x1b[38;2;0;65;122m█\x1b[0m\x1b[38;2;0;51;122m█\x1b[0m\x1b[38;2;0;37;122m█\x1b[0m\x1b[38;2;0;23;122m█\x1b[0m\x1b[38;2;0;9;122m█\x1b[0m\x1b[38;2;4;0;122m█\x1b[0m\x1b[38;2;18;0;122m█\x1b[0m\x1b[38;2;32;0;122m█\x1b[0m\x1b[38;2;47;0;122m█\x1b[0m\x1b[38;2;61;0;122m█\x1b[0m\x1b[38;2;75;0;122m█\x1b[0m\x1b[38;2;89;0;122m█\x1b[0m\x1b[38;2;103;0;122m█\x1b[0m\x1b[38;2;117;0;122m█\x1b[0m\x1b[38;2;122;0;112m█\x1b[0m\x1b[38;2;122;0;98m█\x1b[0m\x1b[38;2;122;0;84m█\x1b[0m\x1b[38;2;122;0;70m█\x1b[0m\x1b[38;2;122;0;56m█\x1b[0m\x1b[38;2;122;0;42m█\x1b[0m\x1b[38;2;122;0;28m█\x1b[0m\x1b[38;2;122;0;14m█\x1b[0m \n ✓ \x1b[1;35mTruecolor (16.7 million)\x1b[0m \x1b[38;2;193;0;0m█\x1b[0m\x1b[38;2;193;22;0m█\x1b[0m\x1b[38;2;193;44;0m█\x1b[0m\x1b[38;2;193;67;0m█\x1b[0m\x1b[38;2;193;89;0m█\x1b[0m\x1b[38;2;193;111;0m█\x1b[0m\x1b[38;2;193;134;0m█\x1b[0m\x1b[38;2;193;156;0m█\x1b[0m\x1b[38;2;193;178;0m█\x1b[0m\x1b[38;2;186;193;0m█\x1b[0m\x1b[38;2;163;193;0m█\x1b[0m\x1b[38;2;141;193;0m█\x1b[0m\x1b[38;2;119;193;0m█\x1b[0m\x1b[38;2;96;193;0m█\x1b[0m\x1b[38;2;74;193;0m█\x1b[0m\x1b[38;2;52;193;0m█\x1b[0m\x1b[38;2;29;193;0m█\x1b[0m\x1b[38;2;7;193;0m█\x1b[0m\x1b[38;2;0;193;14m█\x1b[0m\x1b[38;2;0;193;37m█\x1b[0m\x1b[38;2;0;193;59m█\x1b[0m\x1b[38;2;0;193;81m█\x1b[0m\x1b[38;2;0;193;104m█\x1b[0m\x1b[38;2;0;193;126m█\x1b[0m\x1b[38;2;0;193;149m█\x1b[0m\x1b[38;2;0;193;171m█\x1b[0m\x1b[38;2;0;193;193m█\x1b[0m\x1b[38;2;0;171;193m█\x1b[0m\x1b[38;2;0;149;193m█\x1b[0m\x1b[38;2;0;126;193m█\x1b[0m\x1b[38;2;0;104;193m█\x1b[0m\x1b[38;2;0;81;193m█\x1b[0m\x1b[38;2;0;59;193m█\x1b[0m\x1b[38;2;0;37;193m█\x1b[0m\x1b[38;2;0;14;193m█\x1b[0m\x1b[38;2;7;0;193m█\x1b[0m\x1b[38;2;29;0;193m█\x1b[0m\x1b[38;2;52;0;193m█\x1b[0m\x1b[38;2;74;0;193m█\x1b[0m\x1b[38;2;96;0;193m█\x1b[0m\x1b[38;2;119;0;193m█\x1b[0m\x1b[38;2;141;0;193m█\x1b[0m\x1b[38;2;163;0;193m█\x1b[0m\x1b[38;2;186;0;193m█\x1b[0m\x1b[38;2;193;0;178m█\x1b[0m\x1b[38;2;193;0;156m█\x1b[0m\x1b[38;2;193;0;134m█\x1b[0m\x1b[38;2;193;0;111m█\x1b[0m\x1b[38;2;193;0;89m█\x1b[0m\x1b[38;2;193;0;67m█\x1b[0m\x1b[38;2;193;0;44m█\x1b[0m\x1b[38;2;193;0;22m█\x1b[0m \n ✓ \x1b[1;33mDumb terminals\x1b[0m \x1b[38;2;255;10;10m█\x1b[0m\x1b[38;2;255;38;10m█\x1b[0m\x1b[38;2;255;66;10m█\x1b[0m\x1b[38;2;255;94;10m█\x1b[0m\x1b[38;2;255;123;10m█\x1b[0m\x1b[38;2;255;151;10m█\x1b[0m\x1b[38;2;255;179;10m█\x1b[0m\x1b[38;2;255;207;10m█\x1b[0m\x1b[38;2;255;236;10m█\x1b[0m\x1b[38;2;245;255;10m█\x1b[0m\x1b[38;2;217;255;10m█\x1b[0m\x1b[38;2;189;255;10m█\x1b[0m\x1b[38;2;160;255;10m█\x1b[0m\x1b[38;2;132;255;10m█\x1b[0m\x1b[38;2;104;255;10m█\x1b[0m\x1b[38;2;76;255;10m█\x1b[0m\x1b[38;2;47;255;10m█\x1b[0m\x1b[38;2;19;255;10m█\x1b[0m\x1b[38;2;10;255;29m█\x1b[0m\x1b[38;2;10;255;57m█\x1b[0m\x1b[38;2;10;255;85m█\x1b[0m\x1b[38;2;10;255;113m█\x1b[0m\x1b[38;2;10;255;142m█\x1b[0m\x1b[38;2;10;255;170m█\x1b[0m\x1b[38;2;10;255;198m█\x1b[0m\x1b[38;2;10;255;226m█\x1b[0m\x1b[38;2;10;254;255m█\x1b[0m\x1b[38;2;10;226;255m█\x1b[0m\x1b[38;2;10;198;255m█\x1b[0m\x1b[38;2;10;170;255m█\x1b[0m\x1b[38;2;10;142;255m█\x1b[0m\x1b[38;2;10;113;255m█\x1b[0m\x1b[38;2;10;85;255m█\x1b[0m\x1b[38;2;10;57;255m█\x1b[0m\x1b[38;2;10;29;255m█\x1b[0m\x1b[38;2;19;10;255m█\x1b[0m\x1b[38;2;47;10;255m█\x1b[0m\x1b[38;2;76;10;255m█\x1b[0m\x1b[38;2;104;10;255m█\x1b[0m\x1b[38;2;132;10;255m█\x1b[0m\x1b[38;2;160;10;255m█\x1b[0m\x1b[38;2;189;10;255m█\x1b[0m\x1b[38;2;217;10;255m█\x1b[0m\x1b[38;2;245;10;255m█\x1b[0m\x1b[38;2;255;10;236m█\x1b[0m\x1b[38;2;255;10;207m█\x1b[0m\x1b[38;2;255;10;179m█\x1b[0m\x1b[38;2;255;10;151m█\x1b[0m\x1b[38;2;255;10;123m█\x1b[0m\x1b[38;2;255;10;94m█\x1b[0m\x1b[38;2;255;10;66m█\x1b[0m\x1b[38;2;255;10;38m█\x1b[0m \n ✓ \x1b[1;36mAutomatic color conversion\x1b[0m \x1b[38;2;255;81;81m█\x1b[0m\x1b[38;2;255;101;81m█\x1b[0m\x1b[38;2;255;121;81m█\x1b[0m\x1b[38;2;255;141;81m█\x1b[0m\x1b[38;2;255;161;81m█\x1b[0m\x1b[38;2;255;181;81m█\x1b[0m\x1b[38;2;255;201;81m█\x1b[0m\x1b[38;2;255;221;81m█\x1b[0m\x1b[38;2;255;241;81m█\x1b[0m\x1b[38;2;248;255;81m█\x1b[0m\x1b[38;2;228;255;81m█\x1b[0m\x1b[38;2;208;255;81m█\x1b[0m\x1b[38;2;188;255;81m█\x1b[0m\x1b[38;2;168;255;81m█\x1b[0m\x1b[38;2;148;255;81m█\x1b[0m\x1b[38;2;128;255;81m█\x1b[0m\x1b[38;2;108;255;81m█\x1b[0m\x1b[38;2;88;255;81m█\x1b[0m\x1b[38;2;81;255;94m█\x1b[0m\x1b[38;2;81;255;114m█\x1b[0m\x1b[38;2;81;255;134m█\x1b[0m\x1b[38;2;81;255;154m█\x1b[0m\x1b[38;2;81;255;174m█\x1b[0m\x1b[38;2;81;255;194m█\x1b[0m\x1b[38;2;81;255;214m█\x1b[0m\x1b[38;2;81;255;234m█\x1b[0m\x1b[38;2;81;254;255m█\x1b[0m\x1b[38;2;81;234;255m█\x1b[0m\x1b[38;2;81;214;255m█\x1b[0m\x1b[38;2;81;194;255m█\x1b[0m\x1b[38;2;81;174;255m█\x1b[0m\x1b[38;2;81;154;255m█\x1b[0m\x1b[38;2;81;134;255m█\x1b[0m\x1b[38;2;81;114;255m█\x1b[0m\x1b[38;2;81;94;255m█\x1b[0m\x1b[38;2;88;81;255m█\x1b[0m\x1b[38;2;108;81;255m█\x1b[0m\x1b[38;2;128;81;255m█\x1b[0m\x1b[38;2;148;81;255m█\x1b[0m\x1b[38;2;168;81;255m█\x1b[0m\x1b[38;2;188;81;255m█\x1b[0m\x1b[38;2;208;81;255m█\x1b[0m\x1b[38;2;228;81;255m█\x1b[0m\x1b[38;2;248;81;255m█\x1b[0m\x1b[38;2;255;81;241m█\x1b[0m\x1b[38;2;255;81;221m█\x1b[0m\x1b[38;2;255;81;201m█\x1b[0m\x1b[38;2;255;81;181m█\x1b[0m\x1b[38;2;255;81;161m█\x1b[0m\x1b[38;2;255;81;141m█\x1b[0m\x1b[38;2;255;81;121m█\x1b[0m\x1b[38;2;255;81;101m█\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Styles \x1b[0m\x1b[1;31m \x1b[0mAll ansi styles: \x1b[1mbold\x1b[0m, \x1b[2mdim\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[4munderline\x1b[0m, \x1b[9mstrikethrough\x1b[0m, \x1b[7mreverse\x1b[0m, and even \n \x1b[5mblink\x1b[0m. \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Text \x1b[0m\x1b[1;31m \x1b[0mWord wrap text. Justify \x1b[32mleft\x1b[0m, \x1b[33mcenter\x1b[0m, \x1b[34mright\x1b[0m or \x1b[31mfull\x1b[0m. \n \n \x1b[32mLorem ipsum dolor \x1b[0m \x1b[33m Lorem ipsum dolor \x1b[0m \x1b[34m Lorem ipsum dolor\x1b[0m \x1b[31mLorem\x1b[0m\x1b[31m \x1b[0m\x1b[31mipsum\x1b[0m\x1b[31m \x1b[0m\x1b[31mdolor\x1b[0m\x1b[31m \x1b[0m\x1b[31msit\x1b[0m \n \x1b[32msit amet, \x1b[0m \x1b[33m sit amet, \x1b[0m \x1b[34m sit amet,\x1b[0m \x1b[31mamet,\x1b[0m\x1b[31m \x1b[0m\x1b[31mconsectetur\x1b[0m \n \x1b[32mconsectetur \x1b[0m \x1b[33m consectetur \x1b[0m \x1b[34m consectetur\x1b[0m \x1b[31madipiscing\x1b[0m\x1b[31m \x1b[0m\x1b[31melit.\x1b[0m \n \x1b[32madipiscing elit. \x1b[0m \x1b[33m adipiscing elit. \x1b[0m \x1b[34m adipiscing elit.\x1b[0m \x1b[31mQuisque\x1b[0m\x1b[31m \x1b[0m\x1b[31min\x1b[0m\x1b[31m \x1b[0m\x1b[31mmetus\x1b[0m\x1b[31m \x1b[0m\x1b[31msed\x1b[0m \n \x1b[32mQuisque in metus \x1b[0m \x1b[33mQuisque in metus sed\x1b[0m \x1b[34m Quisque in metus\x1b[0m \x1b[31msapien\x1b[0m\x1b[31m \x1b[0m\x1b[31multricies\x1b[0m \n \x1b[32msed sapien \x1b[0m \x1b[33m sapien ultricies \x1b[0m \x1b[34m sed sapien\x1b[0m \x1b[31mpretium\x1b[0m\x1b[31m \x1b[0m\x1b[31ma\x1b[0m\x1b[31m \x1b[0m\x1b[31mat\x1b[0m\x1b[31m \x1b[0m\x1b[31mjusto.\x1b[0m \n \x1b[32multricies pretium a\x1b[0m \x1b[33mpretium a at justo. \x1b[0m \x1b[34multricies pretium a\x1b[0m \x1b[31mMaecenas\x1b[0m\x1b[31m \x1b[0m\x1b[31mluctus\x1b[0m\x1b[31m \x1b[0m\x1b[31mvelit\x1b[0m \n \x1b[32mat justo. Maecenas \x1b[0m \x1b[33m Maecenas luctus \x1b[0m \x1b[34m at justo. Maecenas\x1b[0m \x1b[31met auctor maximus.\x1b[0m \n \x1b[32mluctus velit et \x1b[0m \x1b[33m velit et auctor \x1b[0m \x1b[34m luctus velit et\x1b[0m \n \x1b[32mauctor maximus. \x1b[0m \x1b[33m maximus. \x1b[0m \x1b[34m auctor maximus.\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31mAsian languages\x1b[0m\x1b[1;31m \x1b[0m🇨🇳 该库支持中文,日文和韩文文本! \n 🇯🇵 ライブラリは中国語、日本語、韓国語のテキストをサポートしています \n 🇰🇷 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31mConsole markup \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;35mRich\x1b[0m supports a simple \x1b[3mbbcode\x1b[0m like \x1b[1mmarkup\x1b[0m for \x1b[33mcolor\x1b[0m, \x1b[4mstyle\x1b[0m, and emoji! 👍 🍎 🐜 … \n 🥖 🚌 \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Tables \x1b[0m\x1b[1;31m \x1b[0m\x1b[1m \x1b[0m\x1b[1;32mDate\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;34mTitle\x1b[0m\x1b[1m \x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1;36mProduction Budget\x1b[0m\x1b[1m \x1b[0m \x1b[1m \x1b[0m\x1b[1m \x1b[0m\x1b[1;35mBox Office\x1b[0m\x1b[1m \x1b[0m \n ────────────────────────────────────────────────────────────────────────────────── \n \x1b[32m \x1b[0m\x1b[32mDec 20, 2019\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars: The Rise of \x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $275,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[35m $375,126,118\x1b[0m\x1b[35m \x1b[0m \n \x1b[34m \x1b[0m\x1b[34mSkywalker \x1b[0m\x1b[34m \x1b[0m \n \x1b[2;32m \x1b[0m\x1b[2;32mMay 25, 2018\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[1;2;34mSolo\x1b[0m\x1b[2;34m: A Star Wars Story \x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $275,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m $393,151,347\x1b[0m\x1b[2;35m \x1b[0m \n \x1b[32m \x1b[0m\x1b[32mDec 15, 2017\x1b[0m\x1b[32m \x1b[0m \x1b[34m \x1b[0m\x1b[34mStar Wars Ep. VIII: The Last\x1b[0m\x1b[34m \x1b[0m \x1b[36m \x1b[0m\x1b[36m $262,000,000\x1b[0m\x1b[36m \x1b[0m \x1b[35m \x1b[0m\x1b[1;35m$1,332,539,889\x1b[0m\x1b[35m \x1b[0m \n \x1b[34m \x1b[0m\x1b[34mJedi \x1b[0m\x1b[34m \x1b[0m \n \x1b[2;32m \x1b[0m\x1b[2;32mMay 19, 1999\x1b[0m\x1b[2;32m \x1b[0m \x1b[2;34m \x1b[0m\x1b[2;34mStar Wars Ep. \x1b[0m\x1b[1;2;34mI\x1b[0m\x1b[2;34m: \x1b[0m\x1b[2;3;34mThe phantom\x1b[0m\x1b[2;34m \x1b[0m \x1b[2;36m \x1b[0m\x1b[2;36m $115,000,000\x1b[0m\x1b[2;36m \x1b[0m \x1b[2;35m \x1b[0m\x1b[2;35m$1,027,044,677\x1b[0m\x1b[2;35m \x1b[0m \n \x1b[2;34m \x1b[0m\x1b[2;3;34mMenace\x1b[0m\x1b[2;34m \x1b[0m\x1b[2;34m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Syntax \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 1 \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mdef\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34miter_last\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mIterable\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m[\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mT\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m]\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m \x1b[1m{\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m highlighting \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 2 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;230;219;116;48;2;39;40;34m\"\"\"Iterate and generate a tuple\x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m'foo'\x1b[0m: \x1b[1m[\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m & \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 3 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalues\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m)\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1;34m3.1427\x1b[0m, \n\x1b[1;31m \x1b[0m\x1b[1;31m pretty \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 4 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mtry\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m(\x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m printing \x1b[0m\x1b[1;31m \x1b[0m\x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 5 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mnext\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m(\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_\x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Paul Atriedies'\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 6 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mexcept\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;166;226;46;48;2;39;40;34mStopIteration\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Vladimir Harkonnen'\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 7 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mreturn\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ │ \x1b[0m\x1b[32m'Thufir Haway'\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 8 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mfor\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34min\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34miter_values\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m:\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ │ \x1b[0m\x1b[1m)\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m 9 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mFalse\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m \x1b[2;32m│ \x1b[0m\x1b[1m]\x1b[0m, \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m10 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ │ \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;249;38;114;48;2;39;40;34m=\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mvalue\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[2;32m│ \x1b[0m\x1b[32m'atomic'\x1b[0m: \x1b[1m(\x1b[0m\x1b[3;91mFalse\x1b[0m, \x1b[3;92mTrue\x1b[0m, \x1b[3;35mNone\x1b[0m\x1b[1m)\x1b[0m \n \x1b[1;38;2;227;227;221;48;2;39;40;34m \x1b[0m\x1b[38;2;101;102;96;48;2;39;40;34m11 \x1b[0m\x1b[2;38;2;117;113;94;48;2;39;40;34m│ \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34myield\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;102;217;239;48;2;39;40;34mTrue\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m,\x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34m \x1b[0m\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\x1b[0m\x1b[48;2;39;40;34m \x1b[0m \x1b[1m}\x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m Markdown \x1b[0m\x1b[1;31m \x1b[0m\x1b[36m# Markdown\x1b[0m ╔══════════════════════════════════════╗ \n ║ \x1b[1mMarkdown\x1b[0m ║ \n \x1b[36mSupports much of the *markdown*, \x1b[0m ╚══════════════════════════════════════╝ \n \x1b[36m__syntax__!\x1b[0m \n Supports much of the \x1b[3mmarkdown\x1b[0m, \x1b[1msyntax\x1b[0m! \n \x1b[36m- Headers\x1b[0m \n \x1b[36m- Basic formatting: **bold**, *italic*, \x1b[0m \x1b[1;33m • \x1b[0mHeaders \n \x1b[36m`code`\x1b[0m \x1b[1;33m • \x1b[0mBasic formatting: \x1b[1mbold\x1b[0m, \x1b[3mitalic\x1b[0m, \x1b[97;40mcode\x1b[0m \n \x1b[36m- Block quotes\x1b[0m \x1b[1;33m • \x1b[0mBlock quotes \n \x1b[36m- Lists, and more...\x1b[0m \x1b[1;33m • \x1b[0mLists, and more... \n \x1b[36m \x1b[0m \n\x1b[1;31m \x1b[0m \n\x1b[1;31m \x1b[0m\x1b[1;31m And more \x1b[0m\x1b[1;31m \x1b[0mProgress bars, columns, styled logging handler, tracebacks, etc... \n\x1b[1;31m \x1b[0m \n"
|
from .auto_argparse import make_parser, parse_args_and_run
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The MLIR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script for updating SPIR-V dialect by scraping information from SPIR-V
# HTML and JSON specs from the Internet.
#
# For example, to define the enum attribute for SPIR-V memory model:
#
# ./gen_spirv_dialect.py --base_td_path /path/to/SPIRVBase.td \
# --new-enum MemoryModel
#
# The 'operand_kinds' dict of spirv.core.grammar.json contains all supported
# SPIR-V enum classes.
import re
import requests
import textwrap
SPIRV_HTML_SPEC_URL = 'https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html'
SPIRV_JSON_SPEC_URL = 'https://raw.githubusercontent.com/KhronosGroup/SPIRV-Headers/master/include/spirv/unified1/spirv.core.grammar.json'
AUTOGEN_OP_DEF_SEPARATOR = '\n// -----\n\n'
AUTOGEN_ENUM_SECTION_MARKER = 'enum section. Generated from SPIR-V spec; DO NOT MODIFY!'
AUTOGEN_OPCODE_SECTION_MARKER = (
'opcode section. Generated from SPIR-V spec; DO NOT MODIFY!')
def get_spirv_doc_from_html_spec():
"""Extracts instruction documentation from SPIR-V HTML spec.
Returns:
- A dict mapping from instruction opcode to documentation.
"""
response = requests.get(SPIRV_HTML_SPEC_URL)
spec = response.content
from bs4 import BeautifulSoup
spirv = BeautifulSoup(spec, 'html.parser')
section_anchor = spirv.find('h3', {'id': '_a_id_instructions_a_instructions'})
doc = {}
for section in section_anchor.parent.find_all('div', {'class': 'sect3'}):
for table in section.find_all('table'):
inst_html = table.tbody.tr.td.p
opname = inst_html.a['id']
# Ignore the first line, which is just the opname.
doc[opname] = inst_html.text.split('\n', 1)[1].strip()
return doc
def get_spirv_grammar_from_json_spec():
"""Extracts operand kind and instruction grammar from SPIR-V JSON spec.
Returns:
- A list containing all operand kinds' grammar
- A list containing all instructions' grammar
"""
response = requests.get(SPIRV_JSON_SPEC_URL)
spec = response.content
import json
spirv = json.loads(spec)
return spirv['operand_kinds'], spirv['instructions']
def split_list_into_sublists(items, offset):
"""Split the list of items into multiple sublists.
This is to make sure the string composed from each sublist won't exceed
80 characters.
Arguments:
- items: a list of strings
- offset: the offset in calculating each sublist's length
"""
chuncks = []
chunk = []
chunk_len = 0
for item in items:
chunk_len += len(item) + 2
if chunk_len > 80:
chuncks.append(chunk)
chunk = []
chunk_len = len(item) + 2
chunk.append(item)
if len(chunk) != 0:
chuncks.append(chunk)
return chuncks
def uniquify(lst, equality_fn):
"""Returns a list after pruning duplicate elements.
Arguments:
- lst: List whose elements are to be uniqued.
- equality_fn: Function used to compare equality between elements of the
list.
Returns:
- A list with all duplicated removed. The order of elements is same as the
original list, with only the first occurence of duplicates retained.
"""
keys = set()
unique_lst = []
for elem in lst:
key = equality_fn(elem)
if equality_fn(key) not in keys:
unique_lst.append(elem)
keys.add(key)
return unique_lst
def gen_operand_kind_enum_attr(operand_kind):
"""Generates the TableGen I32EnumAttr definition for the given operand kind.
Returns:
- The operand kind's name
- A string containing the TableGen I32EnumAttr definition
"""
if 'enumerants' not in operand_kind:
return '', ''
kind_name = operand_kind['kind']
kind_acronym = ''.join([c for c in kind_name if c >= 'A' and c <= 'Z'])
kind_cases = [(case['enumerant'], case['value'])
for case in operand_kind['enumerants']]
kind_cases = uniquify(kind_cases, lambda x: x[1])
max_len = max([len(symbol) for (symbol, _) in kind_cases])
# Generate the definition for each enum case
fmt_str = 'def SPV_{acronym}_{symbol} {colon:>{offset}} '\
'I32EnumAttrCase<"{symbol}", {value}>;'
case_defs = [
fmt_str.format(
acronym=kind_acronym,
symbol=case[0],
value=case[1],
colon=':',
offset=(max_len + 1 - len(case[0]))) for case in kind_cases
]
case_defs = '\n'.join(case_defs)
# Generate the list of enum case names
fmt_str = 'SPV_{acronym}_{symbol}';
case_names = [fmt_str.format(acronym=kind_acronym,symbol=case[0])
for case in kind_cases]
# Split them into sublists and concatenate into multiple lines
case_names = split_list_into_sublists(case_names, 6)
case_names = ['{:6}'.format('') + ', '.join(sublist)
for sublist in case_names]
case_names = ',\n'.join(case_names)
# Generate the enum attribute definition
enum_attr = 'def SPV_{name}Attr :\n '\
'I32EnumAttr<"{name}", "valid SPIR-V {name}", [\n{cases}\n ]> {{\n'\
' let returnType = "::mlir::spirv::{name}";\n'\
' let convertFromStorage = '\
'"static_cast<::mlir::spirv::{name}>($_self.getInt())";\n'\
' let cppNamespace = "::mlir::spirv";\n}}'.format(
name=kind_name, cases=case_names)
return kind_name, case_defs + '\n\n' + enum_attr
def gen_opcode(instructions):
""" Generates the TableGen definition to map opname to opcode
Returns:
- A string containing the TableGen SPV_OpCode definition
"""
max_len = max([len(inst['opname']) for inst in instructions])
def_fmt_str = 'def SPV_OC_{name} {colon:>{offset}} '\
'I32EnumAttrCase<"{name}", {value}>;'
opcode_defs = [
def_fmt_str.format(
name=inst['opname'],
value=inst['opcode'],
colon=':',
offset=(max_len + 1 - len(inst['opname']))) for inst in instructions
]
opcode_str = '\n'.join(opcode_defs)
decl_fmt_str = 'SPV_OC_{name}'
opcode_list = [
decl_fmt_str.format(name=inst['opname']) for inst in instructions
]
opcode_list = split_list_into_sublists(opcode_list, 6)
opcode_list = [
'{:6}'.format('') + ', '.join(sublist) for sublist in opcode_list
]
opcode_list = ',\n'.join(opcode_list)
enum_attr = 'def SPV_OpcodeAttr :\n'\
' I32EnumAttr<"{name}", "valid SPIR-V instructions", [\n'\
'{lst}\n'\
' ]> {{\n'\
' let returnType = "::mlir::spirv::{name}";\n'\
' let convertFromStorage = '\
'"static_cast<::mlir::spirv::{name}>($_self.getInt())";\n'\
' let cppNamespace = "::mlir::spirv";\n}}'.format(
name='Opcode', lst=opcode_list)
return opcode_str + '\n\n' + enum_attr
def update_td_opcodes(path, instructions, filter_list):
"""Updates SPIRBase.td with new generated opcode cases.
Arguments:
- path: the path to SPIRBase.td
- instructions: a list containing all SPIR-V instructions' grammar
- filter_list: a list containing new opnames to add
"""
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_OPCODE_SECTION_MARKER)
assert len(content) == 3
# Extend opcode list with existing list
existing_opcodes = [k[11:] for k in re.findall('def SPV_OC_\w+', content[1])]
filter_list.extend(existing_opcodes)
filter_list = list(set(filter_list))
# Generate the opcode for all instructions in SPIR-V
filter_instrs = list(
filter(lambda inst: (inst['opname'] in filter_list), instructions))
# Sort instruction based on opcode
filter_instrs.sort(key=lambda inst: inst['opcode'])
opcode = gen_opcode(filter_instrs)
# Substitute the opcode
content = content[0] + AUTOGEN_OPCODE_SECTION_MARKER + '\n\n' + \
opcode + '\n\n// End ' + AUTOGEN_OPCODE_SECTION_MARKER \
+ content[2]
with open(path, 'w') as f:
f.write(content)
def update_td_enum_attrs(path, operand_kinds, filter_list):
"""Updates SPIRBase.td with new generated enum definitions.
Arguments:
- path: the path to SPIRBase.td
- operand_kinds: a list containing all operand kinds' grammar
- filter_list: a list containing new enums to add
"""
with open(path, 'r') as f:
content = f.read()
content = content.split(AUTOGEN_ENUM_SECTION_MARKER)
assert len(content) == 3
# Extend filter list with existing enum definitions
existing_kinds = [
k[8:-4] for k in re.findall('def SPV_\w+Attr', content[1])]
filter_list.extend(existing_kinds)
# Generate definitions for all enums in filter list
defs = [gen_operand_kind_enum_attr(kind)
for kind in operand_kinds if kind['kind'] in filter_list]
# Sort alphabetically according to enum name
defs.sort(key=lambda enum : enum[0])
# Only keep the definitions from now on
defs = [enum[1] for enum in defs]
# Substitute the old section
content = content[0] + AUTOGEN_ENUM_SECTION_MARKER + '\n\n' + \
'\n\n'.join(defs) + "\n\n// End " + AUTOGEN_ENUM_SECTION_MARKER \
+ content[2];
with open(path, 'w') as f:
f.write(content)
def snake_casify(name):
"""Turns the given name to follow snake_case convension."""
name = re.sub('\W+', '', name).split()
name = [s.lower() for s in name]
return '_'.join(name)
def map_spec_operand_to_ods_argument(operand):
"""Maps a operand in SPIR-V JSON spec to an op argument in ODS.
Arguments:
- A dict containing the operand's kind, quantifier, and name
Returns:
- A string containing both the type and name for the argument
"""
kind = operand['kind']
quantifier = operand.get('quantifier', '')
# These instruction "operands" are for encoding the results; they should
# not be handled here.
assert kind != 'IdResultType', 'unexpected to handle "IdResultType" kind'
assert kind != 'IdResult', 'unexpected to handle "IdResult" kind'
if kind == 'IdRef':
if quantifier == '':
arg_type = 'SPV_Type'
elif quantifier == '?':
arg_type = 'SPV_Optional<SPV_Type>'
else:
arg_type = 'Variadic<SPV_Type>'
elif kind == 'IdMemorySemantics' or kind == 'IdScope':
# TODO(antiagainst): Need to further constrain 'IdMemorySemantics'
# and 'IdScope' given that they should be gernated from OpConstant.
assert quantifier == '', ('unexpected to have optional/variadic memory '
'semantics or scope <id>')
arg_type = 'I32'
elif kind == 'LiteralInteger':
if quantifier == '':
arg_type = 'I32Attr'
elif quantifier == '?':
arg_type = 'OptionalAttr<I32Attr>'
else:
arg_type = 'OptionalAttr<I32ArrayAttr>'
elif kind == 'LiteralString' or \
kind == 'LiteralContextDependentNumber' or \
kind == 'LiteralExtInstInteger' or \
kind == 'LiteralSpecConstantOpInteger' or \
kind == 'PairLiteralIntegerIdRef' or \
kind == 'PairIdRefLiteralInteger' or \
kind == 'PairIdRefIdRef':
assert False, '"{}" kind unimplemented'.format(kind)
else:
# The rest are all enum operands that we represent with op attributes.
assert quantifier != '*', 'unexpected to have variadic enum attribute'
arg_type = 'SPV_{}Attr'.format(kind)
if quantifier == '?':
arg_type = 'OptionalAttr<{}>'.format(arg_type)
name = operand.get('name', '')
name = snake_casify(name) if name else kind.lower()
return '{}:${}'.format(arg_type, name)
def get_op_definition(instruction, doc, existing_info):
"""Generates the TableGen op definition for the given SPIR-V instruction.
Arguments:
- instruction: the instruction's SPIR-V JSON grammar
- doc: the instruction's SPIR-V HTML doc
- existing_info: a dict containing potential manually specified sections for
this instruction
Returns:
- A string containing the TableGen op definition
"""
fmt_str = 'def SPV_{opname}Op : SPV_Op<"{opname}", [{traits}]> {{\n'\
' let summary = {summary};\n\n'\
' let description = [{{\n'\
'{description}\n\n'\
' ### Custom assembly form\n'\
'{assembly}'\
'}}];\n\n'\
' let arguments = (ins{args});\n\n'\
' let results = (outs{results});\n'\
'{extras}'\
'}}\n'
opname = instruction['opname'][2:]
summary, description = doc.split('\n', 1)
wrapper = textwrap.TextWrapper(
width=76, initial_indent=' ', subsequent_indent=' ')
# Format summary. If the summary can fit in the same line, we print it out
# as a "-quoted string; otherwise, wrap the lines using "[{...}]".
summary = summary.strip();
if len(summary) + len(' let summary = "";') <= 80:
summary = '"{}"'.format(summary)
else:
summary = '[{{\n{}\n }}]'.format(wrapper.fill(summary))
# Wrap description
description = description.split('\n')
description = [wrapper.fill(line) for line in description if line]
description = '\n\n'.join(description)
operands = instruction.get('operands', [])
# Set op's result
results = ''
if len(operands) > 0 and operands[0]['kind'] == 'IdResultType':
results = '\n SPV_Type:$result\n '
operands = operands[1:]
if 'results' in existing_info:
results = existing_info['results']
# Ignore the operand standing for the result <id>
if len(operands) > 0 and operands[0]['kind'] == 'IdResult':
operands = operands[1:]
# Set op' argument
arguments = existing_info.get('arguments', None)
if arguments is None:
arguments = [map_spec_operand_to_ods_argument(o) for o in operands]
arguments = '\n '.join(arguments)
if arguments:
# Prepend and append whitespace for formatting
arguments = '\n {}\n '.format(arguments)
assembly = existing_info.get('assembly', None)
if assembly is None:
assembly = ' ``` {.ebnf}\n'\
' [TODO]\n'\
' ```\n\n'\
' For example:\n\n'\
' ```\n'\
' [TODO]\n'\
' ```\n '
return fmt_str.format(
opname=opname,
traits=existing_info.get('traits', ''),
summary=summary,
description=description,
assembly=assembly,
args=arguments,
results=results,
extras=existing_info.get('extras', ''))
def extract_td_op_info(op_def):
"""Extracts potentially manually specified sections in op's definition.
Arguments: - A string containing the op's TableGen definition
- doc: the instruction's SPIR-V HTML doc
Returns:
- A dict containing potential manually specified sections
"""
# Get opname
opname = [o[8:-2] for o in re.findall('def SPV_\w+Op', op_def)]
assert len(opname) == 1, 'more than one ops in the same section!'
opname = opname[0]
# Get traits
op_tmpl_params = op_def.split('<', 1)[1].split('>', 1)[0].split(', ', 1)
if len(op_tmpl_params) == 1:
traits = ''
else:
traits = op_tmpl_params[1].strip('[]')
# Get custom assembly form
rest = op_def.split('### Custom assembly form\n')
assert len(rest) == 2, \
'{}: cannot find "### Custom assembly form"'.format(opname)
rest = rest[1].split(' let arguments = (ins')
assert len(rest) == 2, '{}: cannot find arguments'.format(opname)
assembly = rest[0].rstrip('}];\n')
# Get arguments
rest = rest[1].split(' let results = (outs')
assert len(rest) == 2, '{}: cannot find results'.format(opname)
args = rest[0].rstrip(');\n')
# Get results
rest = rest[1].split(');', 1)
assert len(rest) == 2, \
'{}: cannot find ");" ending results'.format(opname)
results = rest[0]
extras = rest[1].strip(' }\n')
if extras:
extras = '\n {}\n'.format(extras)
return {
# Prefix with 'Op' to make it consistent with SPIR-V spec
'opname': 'Op{}'.format(opname),
'traits': traits,
'assembly': assembly,
'arguments': args,
'results': results,
'extras': extras
}
def update_td_op_definitions(path, instructions, docs, filter_list):
"""Updates SPIRVOps.td with newly generated op definition.
Arguments:
- path: path to SPIRVOps.td
- instructions: SPIR-V JSON grammar for all instructions
- docs: SPIR-V HTML doc for all instructions
- filter_list: a list containing new opnames to include
Returns:
- A string containing all the TableGen op definitions
"""
with open(path, 'r') as f:
content = f.read()
# Split the file into chuncks, each containing one op.
ops = content.split(AUTOGEN_OP_DEF_SEPARATOR)
header = ops[0]
footer = ops[-1]
ops = ops[1:-1]
# For each existing op, extract the manually-written sections out to retain
# them when re-generating the ops. Also append the existing ops to filter
# list.
op_info_dict = {}
for op in ops:
info_dict = extract_td_op_info(op)
opname = info_dict['opname']
op_info_dict[opname] = info_dict
filter_list.append(opname)
filter_list = sorted(list(set(filter_list)))
op_defs = []
for opname in filter_list:
# Find the grammar spec for this op
instruction = next(
inst for inst in instructions if inst['opname'] == opname)
op_defs.append(
get_op_definition(instruction, docs[opname],
op_info_dict.get(opname, {})))
# Substitute the old op definitions
op_defs = [header] + op_defs + [footer]
content = AUTOGEN_OP_DEF_SEPARATOR.join(op_defs)
with open(path, 'w') as f:
f.write(content)
if __name__ == '__main__':
import argparse
cli_parser = argparse.ArgumentParser(
description='Update SPIR-V dialect definitions using SPIR-V spec')
cli_parser.add_argument(
'--base-td-path',
dest='base_td_path',
type=str,
default=None,
help='Path to SPIRVBase.td')
cli_parser.add_argument(
'--op-td-path',
dest='op_td_path',
type=str,
default=None,
help='Path to SPIRVOps.td')
cli_parser.add_argument(
'--new-enum',
dest='new_enum',
type=str,
default=None,
help='SPIR-V enum to be added to SPIRVBase.td')
cli_parser.add_argument(
'--new-opcodes',
dest='new_opcodes',
type=str,
default=None,
nargs='*',
help='update SPIR-V opcodes in SPIRVBase.td')
cli_parser.add_argument(
'--new-inst',
dest='new_inst',
type=str,
default=None,
help='SPIR-V instruction to be added to SPIRVOps.td')
args = cli_parser.parse_args()
operand_kinds, instructions = get_spirv_grammar_from_json_spec()
# Define new enum attr
if args.new_enum is not None:
assert args.base_td_path is not None
filter_list = [args.new_enum] if args.new_enum else []
update_td_enum_attrs(args.base_td_path, operand_kinds, filter_list)
# Define new opcode
if args.new_opcodes is not None:
assert args.base_td_path is not None
update_td_opcodes(args.base_td_path, instructions, args.new_opcodes)
# Define new op
if args.new_inst is not None:
assert args.op_td_path is not None
filter_list = [args.new_inst] if args.new_inst else []
docs = get_spirv_doc_from_html_spec()
update_td_op_definitions(args.op_td_path, instructions, docs, filter_list)
print('Done. Note that this script just generates a template; ', end='')
print('please read the spec and update traits, arguments, and ', end='')
print('results accordingly.')
|
# importing the requests library
import sys, os
if len(sys.argv) > 6:
title = sys.argv[1]
s = sys.argv[2]
a = int(sys.argv[3])
b = int(sys.argv[4])
sourceLanguage = sys.argv[5]
targetLanguage = sys.argv[6]
else:
print("please enter the title, season number, first episode number, last episode numer, source language, and the target language")
exit(-1)
for n in range(a, b+1):
if s == "2":
prefix = title + "-" + s + "-" + str(n).zfill(3)
else:
prefix = title + "-" + s + "-" + str(n).zfill(2)
os.system("./cleanEpisode " + prefix + " " + sourceLanguage + " " + targetLanguage)
|
#!/usr/bin/python3
# Copyright 2020 Google LLC
# Copyright 2021 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example of fuzzing in Python."""
import atheris
import sys
# This tells Atheris to instrument all functions in the `struct` and
# `example_library` modules.
with atheris.instrument_imports():
import struct
import example_library
@atheris.instrument_func # Instrument the TestOneInput function itself
def TestOneInput(data):
"""The entry point for our fuzzer.
This is a callback that will be repeatedly invoked with different arguments
after Fuzz() is called.
We translate the arbitrary byte string into a format our function being fuzzed
can understand, then call it.
Args:
data: Bytestring coming from the fuzzing engine.
"""
if len(data) != 4:
return # Input must be 4 byte integer.
number, = struct.unpack('<I', data)
example_library.CodeBeingFuzzed(number)
atheris.Setup(sys.argv, TestOneInput)
atheris.Fuzz()
|
import unittest
from core import collect
class TestCollect(unittest.TestCase):
def test_if_we_get_viz_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_viz()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_yen_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_yen()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_sevenseas_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_seven_seas()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_darkhorse_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_dark_horse()
assert response[0].keys() == mock_data.keys()
def test_if_we_get_kodansha_release(self):
mock_data = {
"name": "a",
"img": "img",
"link": "link",
"publisher": "publisher",
}
response = collect.get_kodansha()
assert response[0].keys() == mock_data.keys()
|
#!/usr/bin/python
#-*-coding:utf-8 -*-
# author: mld
# email: miradel51@126.com
# date : 2017/9/28
import sys
import string
import re
def de_tokenizestr(original_str):
after_de_tok = ""
original_str = original_str.replace("[ ","[")
original_str = original_str.replace(" ]","]")
original_str = original_str.replace(" !",'!')
original_str = original_str.replace(" % ","%")
original_str = original_str.replace(" # ","#")
original_str = original_str.replace(" @ ","@")
original_str = original_str.replace(" ~ ","~")
original_str = original_str.replace(" & ","&")
original_str = original_str.replace(" * ","*")
original_str = original_str.replace(" \" ","\"")
original_str = original_str.replace(" .",".")
original_str = original_str.replace(" ;",";")
original_str = original_str.replace(" ,",",")
original_str = original_str.replace(" ^","^")
original_str = original_str.replace("( ","(")
original_str = original_str.replace(" )",")")
original_str = original_str.replace("{ ","{")
original_str = original_str.replace(" >",">")
original_str = original_str.replace("< ","<")
original_str = original_str.replace(" ?","?")
original_str = original_str.replace(" }","}")
original_str = original_str.replace(" - ","-")
original_str = original_str.replace(" : ",":")
original_str = original_str.replace(" = ","=")
original_str = original_str.replace(" + ","+")
after_de_tok = original_str
return after_de_tok
if __name__ == '__main__':
ori_ = sys.argv[1]
de_tok_ = sys.argv[2]
ori_file = open(ori_,"r")
de_tok_file = open(de_tok_,"w")
context = ""
for eachline in ori_file:
context = eachline.strip()
#need to tokenization (just separate symboles from words in current line)
context = de_tokenizestr(context)
de_tok_file.write(context)
de_tok_file.write("\n")
ori_file.close()
de_tok_file.close()
|
#!/usr/bin/env python3
"""
Command line tool to publish balls on the /ball_in_image topic
"""
import rospy
from humanoid_league_msgs.msg import BallInImage, BallInImageArray
import sys
import signal
def _signal_term_handler(signal, frame):
rospy.logerr('User Keyboard interrupt')
sys.exit(0)
if __name__ == "__main__":
# handle keyboard interrupts
signal.signal(signal.SIGINT, _signal_term_handler)
rospy.init_node("ball_tester")
pub = rospy.Publisher("balls_in_image", BallInImageArray, queue_size=10)
while True:
x_str = input("x:")
try:
x = int(x_str)
except ValueError:
print("try again")
continue
y_str = input("y:")
try:
y = int(y_str)
except ValueError:
print("try again")
continue
ba = BallInImageArray()
ba.header.stamp = rospy.get_rostime() - rospy.Duration(0.2)
ball = BallInImage()
ball.confidence = 1
ball.center.x = x
ball.center.y = y
ball.diameter = 0.13
ba.candidates.append(ball)
pub.publish(ba)
|
#!/usr/bin/env python #
# ------------------------------------------------------------------------------------------------------#
# Created by "Thieu Nguyen" at 02:05, 15/12/2019 #
# #
# Email: nguyenthieu2102@gmail.com #
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 #
# Github: https://github.com/thieunguyen5991 #
#-------------------------------------------------------------------------------------------------------#
|
import json
from django.core.exceptions import ObjectDoesNotExist
import mock
from curling.lib import HttpClientError
from mock import ANY
from nose.tools import eq_, ok_, raises
from pyquery import PyQuery as pq
import amo
import amo.tests
from amo.urlresolvers import reverse
from addons.models import (Addon, AddonCategory, AddonDeviceType,
AddonPremium, AddonUpsell, AddonUser, Category)
from constants.payments import (PAYMENT_METHOD_ALL,
PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR)
from mkt.constants.payments import ACCESS_PURCHASE, ACCESS_SIMULATE
from mkt.constants.regions import ALL_REGION_IDS
from market.models import Price
from users.models import UserProfile
import mkt
from mkt.developers.models import (AddonPaymentAccount, PaymentAccount,
SolitudeSeller, uri_to_pk, UserInappKey)
from mkt.site.fixtures import fixture
from mkt.webapps.models import AddonExcludedRegion as AER, ContentRating
# Id without any significance but to be different of 1.
TEST_PACKAGE_ID = 2
def setup_payment_account(app, user, uid='uid', package_id=TEST_PACKAGE_ID):
seller = SolitudeSeller.objects.create(user=user, uuid=uid)
payment = PaymentAccount.objects.create(user=user, solitude_seller=seller,
agreed_tos=True, seller_uri=uid,
uri=uid,
bango_package_id=package_id)
return AddonPaymentAccount.objects.create(addon=app,
product_uri='/path/to/%s/' % app.pk, account_uri=payment.uri,
payment_account=payment)
class InappTest(amo.tests.TestCase):
def setUp(self):
self.create_switch('in-app-payments')
self.app = Addon.objects.get(pk=337141)
self.app.update(premium_type=amo.ADDON_FREE_INAPP)
self.user = UserProfile.objects.get(pk=31337)
self.other = UserProfile.objects.get(pk=999)
self.login(self.user)
self.account = setup_payment_account(self.app, self.user)
self.url = reverse('mkt.developers.apps.in_app_config',
args=[self.app.app_slug])
def set_mocks(self, solitude):
get = mock.Mock()
get.get_object_or_404.return_value = {
'seller_product': '/path/to/prod-pk/'
}
post = mock.Mock()
post.return_value = get
solitude.api.bango.product = post
get = mock.Mock()
get.get_object_or_404.return_value = {'resource_pk': 'some-key',
'secret': 'shhh!'}
post = mock.Mock()
post.return_value = get
solitude.api.generic.product = post
@mock.patch('mkt.developers.views_payments.client')
class TestInappConfig(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
@raises(ObjectDoesNotExist)
def test_not_seller(self, solitude):
post = mock.Mock()
post.side_effect = ObjectDoesNotExist
solitude.api.generic.product = post
eq_(self.client.get(self.url).status_code, 404)
def test_key_generation(self, solitude):
self.set_mocks(solitude)
self.client.post(self.url, {})
args = solitude.api.generic.product().patch.call_args
assert 'secret' in args[1]['data']
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.login(self.other)
eq_(self.client.get(self.url).status_code, 403)
def test_developer(self, solitude):
self.login(self.other)
AddonUser.objects.create(addon=self.app, user=self.other,
role=amo.AUTHOR_ROLE_DEV)
# Developer can read, but not reset.
eq_(self.client.get(self.url).status_code, 200)
eq_(self.client.post(self.url).status_code, 403)
def test_not_inapp(self, solitude):
self.app.update(premium_type=amo.ADDON_PREMIUM)
eq_(self.client.get(self.url).status_code, 302)
def test_no_account(self, solitude):
self.app.app_payment_account.delete()
eq_(self.client.get(self.url).status_code, 302)
@mock.patch('mkt.developers.views_payments.client')
class TestInappSecret(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(TestInappSecret, self).setUp()
self.url = reverse('mkt.developers.apps.in_app_secret',
args=[self.app.app_slug])
def test_show_secret(self, solitude):
self.set_mocks(solitude)
resp = self.client.get(self.url)
eq_(resp.content, 'shhh!')
pk = uri_to_pk(self.account.product_uri)
solitude.api.bango.product.assert_called_with(pk)
solitude.api.generic.product.assert_called_with('prod-pk')
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.client.login(username='regular@mozilla.com', password='password')
eq_(self.client.get(self.url).status_code, 403)
def test_developer(self, solitude):
self.set_mocks(solitude)
self.login(self.other)
AddonUser.objects.create(addon=self.app, user=self.other,
role=amo.AUTHOR_ROLE_DEV)
resp = self.client.get(self.url)
eq_(resp.content, 'shhh!')
class InappKeysTest(InappTest):
fixtures = fixture('webapp_337141', 'user_999')
def setUp(self):
super(InappKeysTest, self).setUp()
self.create_switch('in-app-sandbox')
self.url = reverse('mkt.developers.apps.in_app_keys')
self.seller_uri = '/seller/1/'
self.product_pk = 2
def setup_solitude(self, solitude):
solitude.api.generic.seller.post.return_value = {
'resource_uri': self.seller_uri}
solitude.api.generic.product.post.return_value = {
'resource_pk': self.product_pk}
@mock.patch('mkt.developers.models.client')
class TestInappKeys(InappKeysTest):
def test_logged_out(self, solitude):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_no_key(self, solitude):
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.context['key'], None)
def test_key_generation(self, solitude):
self.setup_solitude(solitude)
res = self.client.post(self.url)
ok_(res['Location'].endswith(self.url), res)
ok_(solitude.api.generic.seller.post.called)
ok_(solitude.api.generic.product.post.called)
key = UserInappKey.objects.get()
eq_(key.solitude_seller.resource_uri, self.seller_uri)
eq_(key.seller_product_pk, self.product_pk)
m = solitude.api.generic.product.post.mock_calls
eq_(m[0][2]['data']['access'], ACCESS_SIMULATE)
def test_reset(self, solitude):
self.setup_solitude(solitude)
key = UserInappKey.create(self.user)
product = mock.Mock()
solitude.api.generic.product.return_value = product
self.client.post(self.url)
product.patch.assert_called_with(data={'secret': ANY})
solitude.api.generic.product.assert_called_with(key.seller_product_pk)
@mock.patch('mkt.developers.models.client')
class TestInappKeySecret(InappKeysTest):
def setUp(self):
super(TestInappKeySecret, self).setUp()
def setup_objects(self, solitude):
self.setup_solitude(solitude)
key = UserInappKey.create(self.user)
self.url = reverse('mkt.developers.apps.in_app_key_secret',
args=[key.pk])
def test_logged_out(self, solitude):
self.setup_objects(solitude)
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_different(self, solitude):
self.setup_objects(solitude)
self.login(self.other)
eq_(self.client.get(self.url).status_code, 403)
def test_secret(self, solitude):
self.setup_objects(solitude)
secret = 'not telling'
product = mock.Mock()
product.get.return_value = {'secret': secret}
solitude.api.generic.product.return_value = product
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.content, secret)
class TestPayments(amo.tests.TestCase):
fixtures = fixture('webapp_337141', 'user_999', 'group_admin',
'user_admin', 'user_admin_group', 'prices')
def setUp(self):
self.webapp = self.get_webapp()
AddonDeviceType.objects.create(
addon=self.webapp, device_type=amo.DEVICE_GAIA.id)
self.url = self.webapp.get_dev_url('payments')
self.user = UserProfile.objects.get(pk=31337)
self.other = UserProfile.objects.get(pk=999)
self.admin = UserProfile.objects.get(email='admin@mozilla.com')
# Default to logging in as the app owner.
self.login(self.user)
self.price = Price.objects.filter()[0]
self.patch = mock.patch('mkt.developers.models.client')
self.sol = self.patch.start()
def tearDown(self):
self.patch.stop()
def get_webapp(self):
return Addon.objects.get(pk=337141)
def get_region_list(self):
return list(AER.objects.values_list('region', flat=True))
def get_postdata(self, extension):
base = {'regions': self.get_region_list(),
'free_platforms': ['free-%s' % dt.class_name for dt in
self.webapp.device_types],
'paid_platforms': ['paid-%s' % dt.class_name for dt in
self.webapp.device_types]}
base.update(extension)
return base
def test_free(self):
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free'}), follow=True)
eq_(self.get_webapp().premium_type, amo.ADDON_FREE)
eq_(res.context['is_paid'], False)
def test_premium_passes(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}), follow=True)
eq_(self.get_webapp().premium_type, amo.ADDON_PREMIUM)
eq_(res.context['is_paid'], True)
def test_check_api_url_in_context(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.get(self.url)
eq_(res.context['api_pricelist_url'],
reverse('api_dispatch_list', kwargs={'resource_name': 'prices',
'api_name': 'webpay'}))
def test_regions_display_free(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#regions-island')), 1)
eq_(len(pqr('#paid-regions-island')), 0)
def test_regions_display_premium(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#regions-island')), 0)
eq_(len(pqr('#paid-regions-island')), 1)
def test_free_with_in_app_tier_id_in_content(self):
price_tier_zero = Price.objects.create(price='0.00')
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#region-list[data-tier-zero-id]')), 1)
eq_(int(pqr('#region-list').attr(
'data-tier-zero-id')), price_tier_zero.pk)
def test_not_applicable_data_attr_in_content(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(len(pqr('#region-list[data-not-applicable-msg]')), 1)
def test_pay_method_ids_in_context(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
res = self.client.get(self.url)
self.assertSetEqual(res.context['payment_methods'].keys(),
[PAYMENT_METHOD_ALL, PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR])
def test_free_with_in_app_deletes_upsell(self):
self.make_premium(self.webapp)
new_upsell_app = Addon.objects.create(type=self.webapp.type,
status=self.webapp.status, name='upsell-%s' % self.webapp.id,
premium_type=amo.ADDON_FREE)
new_upsell = AddonUpsell(premium=self.webapp)
new_upsell.free = new_upsell_app
new_upsell.save()
assert self.webapp.upsold is not None
self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}),
follow=True)
eq_(self.get_webapp().upsold, None)
eq_(AddonPremium.objects.all().count(), 0)
def test_premium_in_app_passes(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
self.assert3xx(res, self.url)
res = self.client.post(
self.url, self.get_postdata({'allow_inapp': True,
'price': self.price.pk,
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().premium_type, amo.ADDON_PREMIUM_INAPP)
def test_later_then_free(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM,
status=amo.STATUS_NULL,
highest_status=amo.STATUS_PENDING)
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free',
'price': self.price.pk}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_PENDING)
eq_(AddonPremium.objects.all().count(), 0)
def test_premium_price_initial_already_set(self):
Price.objects.create(price='0.00') # Make a free tier for measure.
self.make_premium(self.webapp)
r = self.client.get(self.url)
eq_(pq(r.content)('select[name=price] option[selected]').attr('value'),
str(self.webapp.premium.price.id))
def test_premium_price_initial_use_default(self):
Price.objects.create(price='10.00') # Make one more tier.
self.webapp.update(premium_type=amo.ADDON_FREE)
res = self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}), follow=True)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'),
str(Price.objects.get(price='0.99').id))
def test_starting_with_free_inapp_has_free_selected(self):
self.webapp.update(premium_type=amo.ADDON_FREE_INAPP)
res = self.client.get(self.url)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'), 'free')
def test_made_free_inapp_has_free_selected(self):
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True'}), follow=True)
pqr = pq(res.content)
eq_(pqr('select[name=price] option[selected]').attr('value'), 'free')
def test_made_free_inapp_then_free(self):
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
self.make_premium(self.webapp)
self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}))
eq_(self.get_webapp().premium_type, amo.ADDON_FREE_INAPP)
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'free',
'regions': ALL_REGION_IDS}))
eq_(self.get_webapp().premium_type, amo.ADDON_FREE)
def test_free_with_inapp_without_account_is_incomplete(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
# Toggle to paid
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_NULL)
eq_(AddonPremium.objects.all().count(), 0)
def test_paid_app_without_account_is_incomplete(self):
self.webapp.update(premium_type=amo.ADDON_FREE)
# Toggle to paid
self.client.post(
self.url, self.get_postdata({'toggle-paid': 'paid'}))
res = self.client.post(
self.url, self.get_postdata({'price': self.price.pk,
'allow_inapp': 'False',
'regions': ALL_REGION_IDS}))
self.assert3xx(res, self.url)
eq_(self.get_webapp().status, amo.STATUS_NULL)
def setup_payment_acct(self, make_owner, user=None, bango_id=123):
# Set up Solitude return values.
api = self.sol.api # Set up Solitude return values.
api.generic.product.get_object.side_effect = ObjectDoesNotExist
api.generic.product.post.return_value = {'resource_uri': 'gpuri'}
api.bango.product.get_object.side_effect = ObjectDoesNotExist
api.bango.product.post.return_value = {
'resource_uri': 'bpruri', 'bango_id': 123}
if not user:
user = self.user
amo.set_user(user)
if make_owner:
# Make owner
AddonUser.objects.create(addon=self.webapp,
user=user, role=amo.AUTHOR_ROLE_OWNER)
# Set up an existing bank account.
seller = SolitudeSeller.objects.create(
resource_uri='/path/to/sel', user=user, uuid='uuid-%s' % user.pk)
acct = PaymentAccount.objects.create(
user=user, uri='asdf-%s' % user.pk, name='test', inactive=False,
seller_uri='suri-%s' % user.pk, solitude_seller=seller,
bango_package_id=123, agreed_tos=True)
return acct, api, user
def is_owner(self, user):
return (self.webapp.authors.filter(user=user,
addonuser__role=amo.AUTHOR_ROLE_OWNER).exists())
def test_associate_acct_to_app_free_inapp(self):
acct, api, user = self.setup_payment_acct(make_owner=True)
# Must be an app owner to change this.
assert self.is_owner(user)
# Associate account with app.
self.make_premium(self.webapp)
res = self.client.post(
self.url, self.get_postdata({'price': 'free',
'allow_inapp': 'True',
'regions': ALL_REGION_IDS,
'accounts': acct.pk}), follow=True)
self.assertNoFormErrors(res)
eq_(res.status_code, 200)
eq_(self.webapp.app_payment_account.payment_account.pk, acct.pk)
eq_(AddonPremium.objects.all().count(), 0)
def test_associate_acct_to_app(self):
self.make_premium(self.webapp, price=self.price.price)
acct, api, user = self.setup_payment_acct(make_owner=True)
# Must be an app owner to change this.
assert self.is_owner(user)
# Associate account with app.
res = self.client.post(
self.url, self.get_postdata({'price': self.price.pk,
'accounts': acct.pk,
'regions': ALL_REGION_IDS}),
follow=True)
eq_(api.bango.premium.post.call_count, 1)
self.assertNoFormErrors(res)
eq_(res.status_code, 200)
eq_(self.webapp.app_payment_account.payment_account.pk, acct.pk)
kw = api.bango.product.post.call_args[1]['data']
ok_(kw['secret'], kw)
kw = api.generic.product.post.call_args[1]['data']
eq_(kw['access'], ACCESS_PURCHASE)
def test_associate_acct_to_app_when_not_owner(self):
self.make_premium(self.webapp, price=self.price.price)
self.login(self.other)
acct, api, user = self.setup_payment_acct(make_owner=False,
user=self.other)
# Check we're not an owner before we start.
assert not self.is_owner(user)
# Attempt to associate account with app as non-owner.
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk}), follow=True)
# Non-owner posts are forbidden.
eq_(res.status_code, 403)
# Payment account shouldn't be set as we're not the owner.
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
def test_associate_acct_to_app_when_not_owner_and_an_admin(self):
self.make_premium(self.webapp, self.price.price)
self.login(self.admin)
acct, api, user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Check we're not an owner before we start.
assert not self.is_owner(user)
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
# Attempt to associate account with app as non-owner admin.
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertFormError(res, 'bango_account_list_form', 'accounts',
[u'You are not permitted to change payment '
'accounts.'])
# Payment account shouldn't be set as we're not the owner.
assert not (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
pqr = pq(res.content)
# Payment field should be disabled.
eq_(len(pqr('#id_accounts[disabled]')), 1)
# There's no existing associated account.
eq_(len(pqr('.current-account')), 0)
def test_associate_acct_to_app_when_admin_and_owner_acct_exists(self):
self.make_premium(self.webapp, price=self.price.price)
owner_acct, api, owner_user = self.setup_payment_acct(make_owner=True)
assert self.is_owner(owner_user)
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
assert (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
self.login(self.admin)
admin_acct, api, admin_user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Check we're not an owner before we start.
assert not self.is_owner(admin_user)
res = self.client.post(
self.url, self.get_postdata({'accounts': admin_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertFormError(res, 'bango_account_list_form', 'accounts',
[u'You are not permitted to change payment '
'accounts.'])
def test_one_owner_and_a_second_one_sees_selected_plus_own_accounts(self):
self.make_premium(self.webapp, price=self.price.price)
owner_acct, api, owner = self.setup_payment_acct(make_owner=True)
# Should be an owner.
assert self.is_owner(owner)
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
assert (AddonPaymentAccount.objects
.filter(addon=self.webapp).exists())
# Login as other user.
self.login(self.other)
owner_acct2, api, owner2 = self.setup_payment_acct(make_owner=True,
user=self.other)
assert self.is_owner(owner2)
# Should see the saved account plus 2nd owner's own account select
# and be able to save their own account but not the other owners.
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# Check we have just our account option present + '----'.
eq_(len(pqr('#id_accounts option')), 2)
eq_(len(pqr('#id_account[disabled]')), 0)
eq_(pqr('.current-account').text(), unicode(owner_acct))
res = self.client.post(
self.url, self.get_postdata({'accounts': owner_acct2.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
eq_(res.status_code, 200)
self.assertNoFormErrors(res)
pqr = pq(res.content)
eq_(len(pqr('.current-account')), 0)
eq_(pqr('#id_accounts option[selected]').text(), unicode(owner_acct2))
# Now there should just be our account.
eq_(len(pqr('#id_accounts option')), 1)
def test_existing_account_should_be_disabled_for_non_owner(self):
self.make_premium(self.webapp, price=self.price.price)
acct, api, user = self.setup_payment_acct(make_owner=True)
# Must be an app owner to change this.
assert self.is_owner(user)
# Associate account with app.
res = self.client.post(
self.url, self.get_postdata({'accounts': acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
amo.set_user(self.other)
# Make this user a dev so they have access to the payments page.
AddonUser.objects.create(addon=self.webapp,
user=self.other, role=amo.AUTHOR_ROLE_DEV)
self.login(self.other)
# Make sure not an owner.
assert not self.is_owner(self.other)
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# No accounts setup.
eq_(len(pqr('.no-accounts')), 1)
# Currently associated account should be displayed separately.
eq_(pqr('.current-account').text(), unicode(acct))
def test_existing_account_should_be_disabled_for_non_owner_admin(self):
self.make_premium(self.webapp, price=self.price.price)
# Login as regular user
self.login(self.other)
owner_acct, api, user = self.setup_payment_acct(make_owner=True,
user=self.other)
# Must be an app owner to change this.
assert self.is_owner(self.other)
# Associate account with app.
res = self.client.post(self.url,
self.get_postdata({'accounts': owner_acct.pk,
'price': self.price.pk,
'regions': ALL_REGION_IDS}),
follow=True)
self.assertNoFormErrors(res)
# Login as admin.
self.login(self.admin)
# Create an account as an admin.
admin_acct, api, admin_user = self.setup_payment_acct(make_owner=False,
user=self.admin)
# Make sure not an owner.
assert not self.is_owner(self.admin)
res = self.client.get(self.url)
eq_(res.status_code, 200)
pqr = pq(res.content)
# Payment field should be disabled.
eq_(len(pqr('#id_accounts[disabled]')), 1)
# Currently associated account should be displayed separately.
eq_(pqr('.current-account').text(), unicode(owner_acct))
def test_deleted_payment_accounts_switch_to_incomplete_apps(self):
self.make_premium(self.webapp, price=self.price.price)
self.login(self.user)
addon_account = setup_payment_account(self.webapp, self.user)
eq_(self.webapp.status, amo.STATUS_PUBLIC)
self.client.post(reverse('mkt.developers.bango.delete_payment_account',
args=[addon_account.payment_account.pk]))
eq_(self.webapp.reload().status, amo.STATUS_NULL)
def setup_bango_portal(self):
self.create_switch('bango-portal')
self.user = UserProfile.objects.get(pk=31337)
self.webapp.update(premium_type=amo.ADDON_PREMIUM)
self.login(self.user)
self.account = setup_payment_account(self.webapp, self.user)
self.portal_url = self.webapp.get_dev_url(
'payments.bango_portal_from_addon')
def test_bango_portal_links(self):
payments_url = self.webapp.get_dev_url('payments')
res = self.client.get(payments_url)
account_template = self.extract_script_template(
res.content, '#account-row-template')
eq_(len(account_template('.portal-account')), 0)
self.create_switch('bango-portal', db=True)
res = self.client.get(payments_url)
account_template = self.extract_script_template(
res.content, '#account-row-template')
eq_(len(account_template('.portal-account')), 1)
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect(self, api):
self.setup_bango_portal()
authentication_token = u'D0A44686-D4A3-4B2F-9BEB-5E4975E35192'
api.bango.login.post.return_value = {
'person_id': 600925,
'email_address': u'admin@place.com',
'authentication_token': authentication_token,
}
assert self.is_owner(self.user)
res = self.client.get(self.portal_url)
eq_(res.status_code, 204)
eq_(api.bango.login.post.call_args[0][0]['packageId'], TEST_PACKAGE_ID)
redirect_url = res['Location']
assert authentication_token in redirect_url, redirect_url
assert 'emailAddress=admin%40place.com' in redirect_url, redirect_url
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect_api_error(self, api):
self.setup_bango_portal()
err = {'errors': 'Something went wrong.'}
api.bango.login.post.side_effect = HttpClientError(content=err)
res = self.client.get(self.portal_url)
eq_(res.status_code, 400)
eq_(json.loads(res.content), err)
def test_bango_portal_redirect_role_error(self):
# Checks that only the owner can access the page (vs. developers).
self.setup_bango_portal()
addon_user = self.user.addonuser_set.all()[0]
addon_user.role = amo.AUTHOR_ROLE_DEV
addon_user.save()
assert not self.is_owner(self.user)
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
def test_bango_portal_redirect_permission_error(self):
# Checks that the owner of another app can't access the page.
self.setup_bango_portal()
self.login(self.other)
other_webapp = Addon.objects.create(type=self.webapp.type,
status=self.webapp.status, name='other-%s' % self.webapp.id,
premium_type=amo.ADDON_PREMIUM)
AddonUser.objects.create(addon=other_webapp,
user=self.other, role=amo.AUTHOR_ROLE_OWNER)
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
def test_bango_portal_redirect_solitude_seller_error(self):
# Checks that the owner has a SolitudeSeller instance for this app.
self.setup_bango_portal()
assert self.is_owner(self.user)
(self.webapp.app_payment_account.payment_account.
solitude_seller.update(user=self.other))
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
class TestRegions(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'webapps/337141-steamcube']
def setUp(self):
self.webapp = self.get_webapp()
AddonDeviceType.objects.create(
addon=self.webapp, device_type=amo.DEVICE_GAIA.id)
self.url = self.webapp.get_dev_url('payments')
self.username = 'admin@mozilla.com'
assert self.client.login(username=self.username, password='password')
self.patch = mock.patch('mkt.developers.models.client')
self.sol = self.patch.start()
def tearDown(self):
self.patch.stop()
def get_webapp(self):
return Addon.objects.get(pk=337141)
def get_dict(self, **kwargs):
extension = {'regions': mkt.regions.ALL_REGION_IDS,
'other_regions': 'on',
'free_platforms': ['free-%s' % dt.class_name for dt in
self.webapp.device_types]}
extension.update(kwargs)
return extension
def get_excluded_ids(self):
return sorted(AER.objects.filter(addon=self.webapp)
.values_list('region', flat=True))
def test_edit_all_regions_are_not_excluded(self):
# Keep the category around for good measure.
Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
r = self.client.post(self.url, self.get_dict())
self.assertNoFormErrors(r)
eq_(AER.objects.count(), 0)
def test_games_form_disabled(self):
games = Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
AddonCategory.objects.create(addon=self.webapp, category=games)
r = self.client.get(self.url, self.get_dict())
self.assertNoFormErrors(r)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'),
'[%d, %d]' % (mkt.regions.BR.id, mkt.regions.DE.id))
eq_(td.find('.note.disabled-regions').length, 1)
def test_games_form_enabled_with_content_rating(self):
for region in (mkt.regions.BR, mkt.regions.DE):
rb = region.ratingsbodies[0]
ContentRating.objects.create(
addon=self.webapp, ratings_body=rb.id, rating=rb.ratings[0].id)
games = Category.objects.create(type=amo.ADDON_WEBAPP, slug='games')
AddonCategory.objects.create(addon=self.webapp, category=games)
r = self.client.get(self.url)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'), '[]')
eq_(td.find('.note.disabled-regions').length, 0)
def test_brazil_other_cats_form_enabled(self):
r = self.client.get(self.url)
td = pq(r.content)('#regions')
eq_(td.find('div[data-disabled-regions]')
.attr('data-disabled-regions'), '[]')
eq_(td.find('.note.disabled-regions').length, 0)
class PaymentsBase(amo.tests.TestCase):
fixtures = fixture('user_editor', 'user_999')
def setUp(self):
self.user = UserProfile.objects.get(pk=999)
self.login(self.user)
self.account = self.create()
def create(self):
# If user is defined on SolitudeSeller, why do we also need it on
# PaymentAccount? Fewer JOINs.
seller = SolitudeSeller.objects.create(user=self.user)
return PaymentAccount.objects.create(user=self.user,
solitude_seller=seller,
uri='/bango/package/123',
name="cvan's cnotes",
agreed_tos=True)
class TestPaymentAccountsAdd(PaymentsBase):
# TODO: this test provides bare coverage and might need to be expanded.
def setUp(self):
super(TestPaymentAccountsAdd, self).setUp()
self.url = reverse('mkt.developers.bango.add_payment_account')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.post(self.url, data={}))
@mock.patch('mkt.developers.models.client')
def test_create(self, client):
res = self.client.post(self.url, data={
'bankAccountPayeeName': 'name',
'companyName': 'company',
'vendorName': 'vendor',
'financeEmailAddress': 'a@a.com',
'adminEmailAddress': 'a@a.com',
'supportEmailAddress': 'a@a.com',
'address1': 'address 1',
'addressCity': 'city',
'addressState': 'state',
'addressZipCode': 'zip',
'addressPhone': '123',
'countryIso': 'BRA',
'currencyIso': 'EUR',
'bankAccountNumber': '123',
'bankAccountCode': '123',
'bankName': 'asd',
'bankAddress1': 'address 2',
'bankAddressZipCode': '123',
'bankAddressIso': 'BRA',
'account_name': 'account'
})
output = json.loads(res.content)
ok_('pk' in output)
ok_('agreement-url' in output)
eq_(PaymentAccount.objects.count(), 2)
class TestPaymentAccounts(PaymentsBase):
def setUp(self):
super(TestPaymentAccounts, self).setUp()
self.url = reverse('mkt.developers.bango.payment_accounts')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_mine(self):
res = self.client.get(self.url)
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output[0]['id'], self.account.pk)
ok_(''' in output[0]['name']) # Was jinja2 escaped.
class TestPaymentPortal(PaymentsBase):
def setUp(self):
super(TestPaymentPortal, self).setUp()
self.create_switch('bango-portal')
self.app_slug = 'app-slug'
def test_with_app_slug(self):
url = reverse('mkt.developers.bango.payment_accounts')
res = self.client.get(url, {'app-slug': self.app_slug})
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output[0]['portal-url'],
reverse('mkt.developers.apps.payments.bango_portal_from_addon',
args=[self.app_slug]))
def test_without_app_slug(self):
url = reverse('mkt.developers.bango.payment_accounts')
res = self.client.get(url)
eq_(res.status_code, 200)
output = json.loads(res.content)
ok_('portal-url' not in output[0])
class TestPaymentAccount(PaymentsBase):
def setUp(self):
super(TestPaymentAccount, self).setUp()
self.url = reverse('mkt.developers.bango.payment_account',
args=[self.account.pk])
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
@mock.patch('mkt.developers.models.client')
def test_get(self, client):
package = mock.Mock()
package.get.return_value = {'full': {'vendorName': 'testval'}}
client.api.bango.package.return_value = package
res = self.client.get(self.url)
client.api.bango.package.assert_called_with('123')
eq_(res.status_code, 200)
output = json.loads(res.content)
eq_(output['account_name'], self.account.name)
assert 'vendorName' in output, (
'Details from Bango not getting merged in: %s' % output)
eq_(output['vendorName'], 'testval')
class TestPaymentAgreement(PaymentsBase):
def setUp(self):
super(TestPaymentAgreement, self).setUp()
self.url = reverse('mkt.developers.bango.agreement',
args=[self.account.pk])
def test_anon(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
@mock.patch('mkt.developers.views_payments.client.api')
def test_get(self, api):
api.bango.sbi.agreement.get_object.return_value = {
'text': 'blah', 'valid': '2010-08-31T00:00:00'}
res = self.client.get(self.url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['text'], 'blah')
@mock.patch('mkt.developers.views_payments.client.api')
def test_set(self, api):
api.bango.sbi.post.return_value = {
'expires': '2014-08-31T00:00:00',
'valid': '2014-08-31T00:00:00'}
res = self.client.post(self.url)
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data['valid'], '2014-08-31T00:00:00')
class TestPaymentAccountsForm(PaymentsBase):
def setUp(self):
super(TestPaymentAccountsForm, self).setUp()
self.url = reverse('mkt.developers.bango.payment_accounts_form')
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_mine(self):
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(res.context['bango_account_list_form']
.fields['accounts'].choices.queryset.get(), self.account)
def test_mine_disagreed_tos(self):
self.account.update(agreed_tos=False)
res = self.client.get(self.url)
eq_(res.status_code, 200)
self.assertSetEqual(res.context['bango_account_list_form']
.fields['accounts'].choices.queryset.all(), [])
class TestPaymentDelete(PaymentsBase):
def setUp(self):
super(TestPaymentDelete, self).setUp()
self.url = reverse('mkt.developers.bango.delete_payment_account',
args=[self.account.pk])
def test_login_required(self):
self.client.logout()
self.assertLoginRequired(self.client.post(self.url, data={}))
def test_not_mine(self):
self.login(UserProfile.objects.get(pk=5497308))
eq_(self.client.post(self.url, data={}).status_code, 404)
def test_mine(self):
eq_(self.client.post(self.url, data={}).status_code, 200)
eq_(PaymentAccount.objects.get(pk=self.account.pk).inactive, True)
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
"""
Known packaging and package metadata formats.
https://en.wikipedia.org/wiki/Package_manager
https://en.wikipedia.org/wiki/Package_format
"""
package_formats = {
# mainline distros
'rpm': ('RPM (Linux)', ''),
# 'rpm_spec': ('RPM spec file (Linux)', ''),
'deb': ('Debian (Linux)', ''),
# 'deb_control': ('Debian control file (Linux)', ''),
# java
'pom': ('Maven POM (Java)', ''),
'ivy': ('IVY (Java)', ''),
'gradle': ('gradle (Groovy/Java)', 'https://gradle.org/'),
'jboss': ('JBoss (Java)', ''),
'buildr': ('buildr (Ruby)', 'https://buildr.apache.org/'),
'osgi': ('OSGi (Eclipse/Java)', ''),
'sbt': ('sbt (Scala/Java)', 'http://www.scala-sbt.org/'),
'clojars': ('Clojars (Clojure)', ''),
'eclipse': ('Eclipse plugin (Eclipse)', ''),
'netbeans': ('NetBeans plugin (NetBeans)', ''),
'jenkins': ('Jenkins plugin (Jenkins)', ''),
# linux
'lkm': ('Loadable Kernel Module (Linux)', ''),
# Perl
'cpan': ('CPAN (Perl)', ''),
# ruby
'gem': ('RubyGems (Ruby)', ''),
'gemfile': ('Bundler Gemfile/Gemfile.lock (Ruby)', ''),
'gemspec': ('RubyGem gemspec file (Ruby)', ''),
# JS
'npm': ('npm (JavaScript)', ''),
'jspm': ('jspm (JavaScript)', ''),
'bower': ('Bower (JavaScript)', ''),
# php
'pear': ('PEAR (PHP)', ''),
'composer': ('Composer (PHP)', ''),
# python
'setup.py': ('Python package (Python)', ''),
'sdist': ('PyPI (Python)', ''),
'bdist': ('PyPI (Python)', ''),
'pypi': ('PyPI (Python)', ''),
'py': ('Python metadata', ''), # __version__, __copyright__
'egg': ('Egg (Python)', ''),
'wheel': ('Wheel (Python)', ''),
# windows
'nuget': ('NuGet (.NET)', ''),
# exes
'winpe': ('PE Binary (Windows)', ''),
'elf': ('ELF binaries (POSIX)', ''),
'macho': ('Mach-O binaries (MacOSX)', ''),
# installers
'mpkg': ('Apple m/package (MacOSX)', ''),
'msi': ('Windows installer (Windows)', ''),
# mobile
'ipa': ('.ipa (iOS)', ''),
'apk': ('.apk (Android)', ''),
'modlic': ('MODULE_LICENSE (Android)', ''),
# Go
'godoc': ('GoDoc (Go)', ''),
'godep': ('Godep (Go)', 'https://github.com/tools/godep'),
# less common
'gom': ('Gom (Go)', ''),
'gondler': ('Gondler (Go)', ''),
'goop': ('Goop (Go)', ''),
'dondur': ('dondur (Go)', 'https://github.com/oguzbilgic/dondur'),
# less common
'buildout':('buildout (Python)', ''),
'about': ('AboutCode', 'http://aboutcode.org'),
'freebsd': ('FreeBSD ports (Unix)', ''),
'openbsd': ('OpenBSD ports (Unix)', ''),
'podfile': ('CocoaPods Podfile (Objective-C/Swift)', 'https://cocoapods.org/'),
'vmdk': ('VMware disk image', ''),
'vdi': ('VirtualBox disk image', ''),
'spdx': ('SPDX', ''),
'doap': ('DOAP', ''),
'docker': ('Docker Image', ''),
'bosh': ('BOSH (CloudFoundry)', ''),
'ebuild': ('Gentoo ebuild(Linux)', ''),
'nix': ('NixOS (Linux)', ''),
'conary': ('conary rPath (Linux)', ''),
'opkg': ('Yocto opkg (Linux)', ''),
'pacman': ('ArchLinux pacman (Linux)', ''),
'pkgsrc': ('NetBSD pkgsrc (Unix)', ''),
'brew': ('Homebrew (MacOSX)', ''),
'slack': ('Slackware (Linux)', ''),
'solaris': ('Solaris (Unix)', ''),
'cran': ('CRAN (R)' , ''),
'mix': ('Mix (Elixir/Erlang)', 'http://Hex.pm',),
'melpa': ('MELPA (Emacs)', ''),
'cabal': ('Cabal (Haskell)', ''),
'cargo': ('cargo (Rust)', ''),
'conda': ('Conda (Python)', ''),
'pypm': ('PyPM (Python)', ''),
'rocks': ('LuaRocks (Lua)', ''),
'appdata': ('AppStream (Linux)', 'https://github.com/ximion/appstream'),
'asdf': ('ASDF (Common Lisp)', ''),
'ctan': ('CTAN (TeX)', ''),
'appx': ('.appx (Windows 8)', ''),
'sublime': ('Sublime plugin (Sublime)', ''),
'rebar': ('Rebar (Erlang)', ''),
'cean': ('CEAN (Erlang)', ''),
'beam': ('Beam (Erlang)', ''),
}
|
import numpy as np
import pandas as pd
import openturns as ot
from .conf_file_generation import GENERATION_CONF, post_process_generated_dataset
def sample_from_conf(
var_conf: dict, corr_conf: dict, n_sample: int, seed: int = None
) -> pd.DataFrame:
"""
Generate dataset with n_sample form configuration file var_conf.
Parameters
----------
var_conf: dict
Configuration file of the variables (correlations,
marginal distributions, rounding)
n_sample: int
Number of row in output dataset
seed: int, optional
Optional seed for replicability
Outputs
-------
df_sample: pd.DataFrame
dataset generated from conf files
"""
## Retrieve target variable
var_list = list(var_conf.keys())
target_var = var_list[-1]
i_target_var = len(var_list) - 1
assert var_conf[target_var]["corr"] is None # Make sure that correlation
# parameter is set to None for the target variable.
## Extract var to i_var dict
var_dict = {}
for i_var, var in enumerate(var_list):
var_dict[var] = i_var
## Define marginal distributions of each variable
marginals = []
for var in var_list:
marginals.append(var_conf[var]["marg"])
## Define correlations with target variable
R = ot.CorrelationMatrix(len(var_list))
for i_var, var in enumerate(var_list):
if var != target_var:
R[i_var, i_target_var] = var_conf[var]["corr"]
## Define correlations within explanatory variables
for key, value in corr_conf.items():
i_min = min(var_dict[key[0]], var_dict[key[1]])
i_max = max(var_dict[key[0]], var_dict[key[1]])
R[i_min, i_max] = value
## Build distribution and sample
copula = ot.NormalCopula(R)
distribution = ot.ComposedDistribution(marginals, copula)
if seed is not None:
ot.RandomGenerator.SetSeed(seed)
df_sample = pd.DataFrame(
np.array(distribution.getSample(n_sample)), columns=var_list
)
## Apply bounds
for var in var_list:
if var_conf[var]["bounds"] is not None:
df_sample[var] = df_sample[var].clip(
var_conf[var]["bounds"][0], var_conf[var]["bounds"][1]
)
## Applys rounding
for var in var_list:
df_sample[var] = df_sample[var].round(var_conf[var]["round"])
## Apply post-processinf
df_sample = post_process_generated_dataset(df_sample)
return df_sample
def prepare_ML_sets(
generation_conf: dict, n_sample: int, test_size: float = 0.25, seed: int = None
) -> tuple:
"""
Generate train, eval and test sets in X, y scikit-learn format.
Parameters
----------
generation_conf: dict
Configuration file of dataset
n_sample: int
Number of row in output dataset
test_size: float, optional
Proportion of test_size. Note that eval_size is set to eval_size
seed: int, optional
Returns
-------
output: tuple
tuple of generated datasets with format:
(X_train, y_train, X_eval, y_eval, X_test, y_test)
"""
## Get target_var name
target_var = list(generation_conf["train"]["var"].keys())[-1]
steps = ["train", "eval", "test"]
n_sample_list = [
int(n_sample * (1 - 2 * test_size)),
int(n_sample * test_size),
int(n_sample * test_size),
]
output = []
for i_step, (step, i_sample) in enumerate(zip(steps, n_sample_list)):
if seed is None: # Change seed for each step
current_seed = None
else:
current_seed = seed + i_step
df_step = sample_from_conf(
generation_conf[step]["var"],
generation_conf[step]["corr"],
i_sample, #
seed=current_seed,
)
output += [df_step.drop([target_var], axis=1), df_step[target_var]]
return tuple(output)
|
import os
import sys
sys.path.append(os.path.normpath(os.path.join(os.path.abspath(__file__), '..', '..', '..', "common")))
from env_indigo import *
indigo = Indigo()
for m in indigo.iterateSDFile(joinPathPy('molecules/partial_arom.sdf', __file__)):
print("Smiles: " + m.smiles())
# count number of aromatic bonds
arom_bonds = len([1 for b in m.iterateBonds() if b.bondOrder() == 4])
print(" Aromatic bonds: %d" % arom_bonds)
m2 = indigo.loadMolecule(m.smiles())
print("Reloaded smiles: " + m2.smiles())
arom_bonds2 = len([1 for b in m2.iterateBonds() if b.bondOrder() == 4])
print(" Aromatic bonds: %d" % arom_bonds2)
if arom_bonds != arom_bonds2:
sys.stderr.write("Number of aromatic bonds (%d and %d) is different in %s and %s.\n" %
(arom_bonds, arom_bonds2, m.smiles(), m2.smiles()))
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import shutil
import sys
from spack import *
class Hdf5(AutotoolsPackage):
"""HDF5 is a data model, library, and file format for storing and managing
data. It supports an unlimited variety of datatypes, and is designed for
flexible and efficient I/O and for high volume and complex data.
"""
homepage = "https://support.hdfgroup.org/HDF5/"
url = "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.10/hdf5-1.10.1/src/hdf5-1.10.1.tar.gz"
list_url = "https://support.hdfgroup.org/ftp/HDF5/releases"
list_depth = 3
version('1.10.2', '8d4eae84e533efa57496638fd0dca8c3')
version('1.10.1', '43a2f9466702fb1db31df98ae6677f15')
version('1.10.0-patch1', '9180ff0ef8dc2ef3f61bd37a7404f295')
version('1.10.0', 'bdc935337ee8282579cd6bc4270ad199')
version('1.8.19', '7f568e2464d4ab0a74d16b23956d900b')
version('1.8.18', 'dd2148b740713ca0295442ec683d7b1c')
version('1.8.17', '7d572f8f3b798a628b8245af0391a0ca')
version('1.8.16', 'b8ed9a36ae142317f88b0c7ef4b9c618')
version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')
version('1.8.14', 'a482686e733514a51cde12d6fe5c5d95')
version('1.8.13', 'c03426e9e77d7766944654280b467289')
version('1.8.12', 'd804802feb99b87fc668a90e6fa34411')
version('1.8.10', '710aa9fb61a51d61a7e2c09bf0052157')
variant('debug', default=False,
description='Builds a debug version of the library')
variant('shared', default=True,
description='Builds a shared version of the library')
variant('hl', default=False, description='Enable the high-level library')
variant('cxx', default=False, description='Enable C++ support')
variant('fortran', default=False, description='Enable Fortran support')
variant('threadsafe', default=False,
description='Enable thread-safe capabilities')
variant('mpi', default=True, description='Enable MPI support')
variant('szip', default=False, description='Enable szip support')
variant('pic', default=True,
description='Produce position-independent code (for shared libs)')
depends_on('mpi', when='+mpi')
# numactl does not currently build on darwin
if sys.platform != 'darwin':
depends_on('numactl', when='+mpi+fortran')
depends_on('szip', when='+szip')
depends_on('zlib@1.1.2:')
# There are several officially unsupported combinations of the features:
# 1. Thread safety is not guaranteed via high-level C-API but in some cases
# it works.
# conflicts('+threadsafe+hl')
# 2. Thread safety is not guaranteed via Fortran (CXX) API, but it's
# possible for a dependency tree to contain a package that uses Fortran
# (CXX) API in a single thread and another one that uses low-level C-API
# in multiple threads. To allow for such scenarios, we don't specify the
# following conflicts.
# conflicts('+threadsafe+cxx')
# conflicts('+threadsafe+fortran')
# 3. Parallel features are not supported via CXX API, but for the reasons
# described in #2 we allow for such combination.
# conflicts('+mpi+cxx')
# There are known build failures with intel@18.0.1. This issue is
# discussed and patch is provided at
# https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/747951.
patch('h5f90global-mult-obj-same-equivalence-same-common-block.patch',
when='@1.10.1%intel@18')
# Turn line comments into block comments to conform with pre-C99 language
# standards. Versions of hdf5 after 1.8.10 don't require this patch,
# either because they conform to pre-C99 or neglect to ask for pre-C99
# language standards from their compiler. The hdf5 build system adds
# the -ansi cflag (run 'man gcc' for info on -ansi) for some versions
# of some compilers (see hdf5-1.8.10/config/gnu-flags). The hdf5 build
# system does not provide an option to disable -ansi, but since the
# pre-C99 code is restricted to just five lines of line comments in
# three src files, this patch accomplishes the simple task of patching the
# three src files and leaves the hdf5 build system alone.
patch('pre-c99-comments.patch', when='@1.8.10')
filter_compiler_wrappers('h5cc', 'h5c++', 'h5fc', relative_root='bin')
def url_for_version(self, version):
url = "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-{0}/hdf5-{1}/src/hdf5-{1}.tar.gz"
return url.format(version.up_to(2), version)
@property
def libs(self):
"""HDF5 can be queried for the following parameters:
- "hl": high-level interface
- "cxx": C++ APIs
- "fortran": Fortran APIs
:return: list of matching libraries
"""
query_parameters = self.spec.last_query.extra_parameters
shared = '+shared' in self.spec
# This map contains a translation from query_parameters
# to the libraries needed
query2libraries = {
tuple(): ['libhdf5'],
('cxx', 'fortran', 'hl'): [
'libhdf5hl_fortran',
'libhdf5_hl_cpp',
'libhdf5_hl',
'libhdf5_fortran',
'libhdf5',
],
('cxx', 'hl'): [
'libhdf5_hl_cpp',
'libhdf5_hl',
'libhdf5',
],
('fortran', 'hl'): [
'libhdf5hl_fortran',
'libhdf5_hl',
'libhdf5_fortran',
'libhdf5',
],
('hl',): [
'libhdf5_hl',
'libhdf5',
],
('cxx', 'fortran'): [
'libhdf5_fortran',
'libhdf5_cpp',
'libhdf5',
],
('cxx',): [
'libhdf5_cpp',
'libhdf5',
],
('fortran',): [
'libhdf5_fortran',
'libhdf5',
]
}
# Turn the query into the appropriate key
key = tuple(sorted(query_parameters))
libraries = query2libraries[key]
return find_libraries(
libraries, root=self.prefix, shared=shared, recursive=True
)
@run_before('configure')
def fortran_check(self):
if '+fortran' in self.spec and not self.compiler.fc:
msg = 'cannot build a Fortran variant without a Fortran compiler'
raise RuntimeError(msg)
def configure_args(self):
# Always enable this option. This does not actually enable any
# features: it only *allows* the user to specify certain
# combinations of other arguments. Enabling it just skips a
# sanity check in configure, so this doesn't merit a variant.
extra_args = ['--enable-unsupported']
extra_args += self.enable_or_disable('threadsafe')
extra_args += self.enable_or_disable('cxx')
extra_args += self.enable_or_disable('hl')
extra_args += self.enable_or_disable('fortran')
if '+szip' in self.spec:
extra_args.append('--with-szlib=%s' % self.spec['szip'].prefix)
else:
extra_args.append('--without-szlib')
if self.spec.satisfies('@1.10:'):
if '+debug' in self.spec:
extra_args.append('--enable-build-mode=debug')
else:
extra_args.append('--enable-build-mode=production')
else:
if '+debug' in self.spec:
extra_args.append('--enable-debug=all')
else:
extra_args.append('--enable-production')
# '--enable-fortran2003' no longer exists as of version 1.10.0
if '+fortran' in self.spec:
extra_args.append('--enable-fortran2003')
else:
extra_args.append('--disable-fortran2003')
if '+shared' in self.spec:
extra_args.append('--enable-shared')
else:
extra_args.append('--disable-shared')
extra_args.append('--enable-static-exec')
if '+pic' in self.spec:
extra_args += ['%s=%s' % (f, self.compiler.pic_flag)
for f in ['CFLAGS', 'CXXFLAGS', 'FCFLAGS']]
if '+mpi' in self.spec:
# The HDF5 configure script warns if cxx and mpi are enabled
# together. There doesn't seem to be a real reason for this, except
# that parts of the MPI interface are not accessible via the C++
# interface. Since they are still accessible via the C interface,
# this is not actually a problem.
extra_args += ['--enable-parallel',
'CC=%s' % self.spec['mpi'].mpicc]
if '+cxx' in self.spec:
extra_args.append('CXX=%s' % self.spec['mpi'].mpicxx)
if '+fortran' in self.spec:
extra_args.append('FC=%s' % self.spec['mpi'].mpifc)
extra_args.append('--with-zlib=%s' % self.spec['zlib'].prefix)
return extra_args
@run_after('configure')
def patch_postdeps(self):
if '@:1.8.14' in self.spec:
# On Ubuntu14, HDF5 1.8.12 (and maybe other versions)
# mysteriously end up with "-l -l" in the postdeps in the
# libtool script. Patch this by removing the spurious -l's.
filter_file(
r'postdeps="([^"]*)"',
lambda m: 'postdeps="%s"' % ' '.join(
arg for arg in m.group(1).split(' ') if arg != '-l'),
'libtool')
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
# Build and run a small program to test the installed HDF5 library
spec = self.spec
print("Checking HDF5 installation...")
checkdir = "spack-check"
with working_dir(checkdir, create=True):
source = r"""
#include <hdf5.h>
#include <assert.h>
#include <stdio.h>
int main(int argc, char **argv) {
unsigned majnum, minnum, relnum;
herr_t herr = H5get_libversion(&majnum, &minnum, &relnum);
assert(!herr);
printf("HDF5 version %d.%d.%d %u.%u.%u\n", H5_VERS_MAJOR, H5_VERS_MINOR,
H5_VERS_RELEASE, majnum, minnum, relnum);
return 0;
}
"""
expected = """\
HDF5 version {version} {version}
""".format(version=str(spec.version.up_to(3)))
with open("check.c", 'w') as f:
f.write(source)
if '+mpi' in spec:
cc = Executable(spec['mpi'].mpicc)
else:
cc = Executable(self.compiler.cc)
cc(*(['-c', "check.c"] + spec['hdf5'].headers.cpp_flags.split()))
cc(*(['-o', "check", "check.o"] +
spec['hdf5'].libs.ld_flags.split()))
try:
check = Executable('./check')
output = check(output=str)
except ProcessError:
output = ""
success = output == expected
if not success:
print("Produced output does not match expected output.")
print("Expected output:")
print('-' * 80)
print(expected)
print('-' * 80)
print("Produced output:")
print('-' * 80)
print(output)
print('-' * 80)
raise RuntimeError("HDF5 install check failed")
shutil.rmtree(checkdir)
|
import getopt, os, time, re, gzip, json, traceback
import sys, uuid
from config import DBConfig, Config
from part import PartitionedList
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects.postgresql import UUID, JSONB
from sqlalchemy.dialects.oracle import RAW, CLOB
from sqlalchemy.dialects.mysql import BINARY
from sqlalchemy.types import TypeDecorator, CHAR, String
from stats import Stats
Version = "1.1"
t0 = time.time()
#from sqlalchemy import schema
Usage = """
python db_dump.py [options] -c <config.yaml> <rse_name>
-c <config file> -- required
-d <db config file> -- required - uses rucio.cfg format. Must contain "default" and "schema" under [databse]
-v -- verbose
-n <nparts>
-f <state>:<prefix> -- filter files with given state to the files set with prefix
state can be either combination of capital letters or "*"
can be repeated ( -f A:/path1 -f CD:/path2 )
use "*" for state to send all the files to the output set ( -f *:/path )
-l -- include more columns, otherwise physical path only, automatically on if -a is used
-z -- produce gzipped output
-s <stats file> -- write stats into JSON file
-S <key> -- add dump stats to stats under the key
-m <N files> -- stop after N files
"""
class GUID(TypeDecorator):
"""
Platform-independent GUID type.
Uses PostgreSQL's UUID type,
uses Oracle's RAW type,
uses MySQL's BINARY type,
otherwise uses CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
elif dialect.name == 'oracle':
return dialect.type_descriptor(RAW(16))
elif dialect.name == 'mysql':
return dialect.type_descriptor(BINARY(16))
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value).lower()
elif dialect.name == 'oracle':
return uuid.UUID(value).bytes
elif dialect.name == 'mysql':
return uuid.UUID(value).bytes
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value)
else:
# hexstring
return "%.32x" % value
def process_result_value(self, value, dialect):
if value is None:
return value
elif dialect.name == 'oracle':
return str(uuid.UUID(bytes=value)).replace('-', '').lower()
elif dialect.name == 'mysql':
return str(uuid.UUID(bytes=value)).replace('-', '').lower()
else:
return str(uuid.UUID(value)).replace('-', '').lower()
opts, args = getopt.getopt(sys.argv[1:], "f:c:ln:vd:s:S:zm:")
filters = {}
all_states = set()
for opt, val in opts:
if opt == '-f':
states, prefix = val.split(':')
filters[states] = prefix
all_states |= set(states)
opts = dict(opts)
if not args or (not "-c" in opts and not "-d" in opts):
print (Usage)
sys.exit(2)
verbose = "-v" in opts
long_output = "-l" in opts
out_prefix = opts.get("-o")
zout = "-z" in opts
stats_file = opts.get("-s")
stats_key = opts.get("-S", "db_dump")
stop_after = int(opts.get("-m", 0)) or None
rse_name = args[0]
if "-d" in opts:
dbconfig = DBConfig.from_cfg(opts["-d"])
else:
dbconfig = DBConfig.from_yaml(opts["-c"])
#print("dbconfig: url:", dbconfig.DBURL, "schema:", dbconfig.Schema)
config = Config(opts["-c"])
stats = None if stats_file is None else Stats(stats_file)
if stats:
stats[stats_key] = {
"status":"started",
"version":Version,
"rse":rse_name,
"start_time":t0,
"end_time":None,
"files":None,
"elapsed":None,
"directories":None,
"exception":[]
}
try:
Base = declarative_base()
if dbconfig.Schema:
Base.metadata.schema = dbconfig.Schema
class Replica(Base):
__tablename__ = "replicas"
path = Column(String)
state = Column(String)
rse_id = Column(GUID(), primary_key=True)
scope = Column(String, primary_key=True)
name = Column(String, primary_key=True)
class RSE(Base):
__tablename__ = "rses"
id = Column(GUID(), primary_key=True)
rse = Column(String)
if "-n" in opts:
nparts = int(opts["-n"])
else:
nparts = config.nparts(rse_name) or 1
subdir = config.dbdump_root(rse_name) or "/"
if not subdir.endswith("/"): subdir = subdir + "/"
print(f"Filtering files under {subdir} only")
_, ignore_file_patterns = config.ignore_patterns(rse_name)
engine = create_engine(dbconfig.DBURL, echo=verbose)
Session = sessionmaker(bind=engine)
session = Session()
rse = session.query(RSE).filter(RSE.rse == rse_name).first()
if rse is None:
print ("RSE %s not found" % (rse_name,))
sys.exit(1)
rse_id = rse.id
#print ("rse_id:", type(rse_id), rse_id)
batch = 100000
outputs = {
states:PartitionedList.create(nparts, prefix, zout) for states, prefix in filters.items()
}
all_replicas = '*' in all_states
replicas = session.query(Replica).filter(Replica.rse_id==rse_id).yield_per(batch)
if all_replicas:
sys.stderr.write("including all replias\n")
else:
print("including replicas in states:", list(all_states), file=sys.stderr)
replicas = replicas.filter(Replica.state.in_(list(all_states)))
dirs = set()
n = 0
filter_re = config.dbdump_param(rse, "filter")
if filter_re:
filter_re = re.compile(filter_re)
for r in replicas:
path = r.name
state = r.state
if not path.startswith(subdir):
continue
if filter_re is not None:
if not filter_re.search(path):
continue
if any(p.match(path) for p in ignore_file_patterns):
continue
words = path.rsplit("/", 1)
if len(words) == 1:
dirp = "/"
else:
dirp = words[0]
dirs.add(dirp)
for s, out_list in outputs.items():
if state in s or s == '*':
if long_output:
out_list.add("%s\t%s\t%s\t%s\t%s" % (rse_name, r.scope, r.name, path or "null", r.state))
else:
out_list.add(path or "null")
n += 1
if n % batch == 0:
print(n)
if stop_after is not None and n >= stop_after:
print(f"stopped after {stop_after} files", file=sys.stderr)
break
for out_list in outputs.values():
out_list.close()
sys.stderr.write("Found %d files in %d directories\n" % (n, len(dirs)))
t1 = time.time()
t = int(t1 - t0)
s = t % 60
m = t // 60
sys.stderr.write("Elapsed time: %dm%02ds\n" % (m, s))
except:
lines = traceback.format_exc().split("\n")
t1 = time.time()
if stats is not None:
stats[stats_key].update({
"status":"failed",
"end_time":t1,
"exception":lines
})
stats.save()
else:
if stats is not None:
stats[stats_key].update({
"status":"done",
"end_time":t1,
"files":n,
"elapsed":t1-t0,
"directories":len(dirs)
})
stats.save()
|
# Copyright (c) 2020
# Author: xiaoweixiang
import distutils.command.bdist_wininst as orig
class bdist_wininst(orig.bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
"""
Supplement reinitialize_command to work around
http://bugs.python.org/issue20819
"""
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None
return cmd
def run(self):
self._is_running = True
try:
orig.bdist_wininst.run(self)
finally:
self._is_running = False
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Luascoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid blocks.
In this test we connect to one node over p2p, and test block requests:
1) Valid blocks should be requested and become chain tip.
2) Invalid block with duplicated transaction should be re-requested.
3) Invalid block with bad coinbase value should be rejected and not
re-requested.
"""
import copy
from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script
from test_framework.messages import COIN
from test_framework.mininode import P2PDataStore
from test_framework.test_framework import LuascoinTestFramework
from test_framework.util import assert_equal
class InvalidBlockRequestTest(LuascoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [["-whitelist=127.0.0.1"]]
def run_test(self):
# Add p2p connection to node0
node = self.nodes[0] # convenience reference to the node
node.add_p2p_connection(P2PDataStore())
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
self.log.info("Create a new block with an anyone-can-spend coinbase")
height = 1
block = create_block(tip, create_coinbase(height), block_time)
block.solve()
# Save the coinbase for later
block1 = block
tip = block.sha256
node.p2p.send_blocks_and_test([block1], node, success=True)
self.log.info("Mature the block.")
node.generatetoaddress(100, node.get_deterministic_priv_key().address)
best_block = node.getblock(node.getbestblockhash())
tip = int(node.getbestblockhash(), 16)
height = best_block["height"] + 1
block_time = best_block["time"] + 1
# Use merkle-root malleability to generate an invalid block with
# same blockheader (CVE-2012-2459).
# Manufacture a block with 3 transactions (coinbase, spend of prior
# coinbase, spend of that spend). Duplicate the 3rd transaction to
# leave merkle root and blockheader unchanged but invalidate the block.
# For more information on merkle-root malleability see src/consensus/merkle.cpp.
self.log.info("Test merkle root malleability.")
block2 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
# b'0x51' is OP_TRUE
tx1 = create_tx_with_script(block1.vtx[0], 0, script_sig=b'\x51', amount=50 * COIN)
tx2 = create_tx_with_script(tx1, 0, script_sig=b'\x51', amount=50 * COIN)
block2.vtx.extend([tx1, tx2])
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.rehash()
block2.solve()
orig_hash = block2.sha256
block2_orig = copy.deepcopy(block2)
# Mutate block 2
block2.vtx.append(tx2)
assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root())
assert_equal(orig_hash, block2.rehash())
assert block2_orig.vtx != block2.vtx
node.p2p.send_blocks_and_test([block2], node, success=False, reject_reason='bad-txns-duplicate')
# Check transactions for duplicate inputs (CVE-2018-17144)
self.log.info("Test duplicate input block.")
block2_dup = copy.deepcopy(block2_orig)
block2_dup.vtx[2].vin.append(block2_dup.vtx[2].vin[0])
block2_dup.vtx[2].rehash()
block2_dup.hashMerkleRoot = block2_dup.calc_merkle_root()
block2_dup.rehash()
block2_dup.solve()
node.p2p.send_blocks_and_test([block2_dup], node, success=False, reject_reason='bad-txns-inputs-duplicate')
self.log.info("Test very broken block.")
block3 = create_block(tip, create_coinbase(height), block_time)
block_time += 1
block3.vtx[0].vout[0].nValue = 100 * COIN # Too high!
block3.vtx[0].sha256 = None
block3.vtx[0].calc_sha256()
block3.hashMerkleRoot = block3.calc_merkle_root()
block3.rehash()
block3.solve()
node.p2p.send_blocks_and_test([block3], node, success=False, reject_reason='bad-cb-amount')
# Complete testing of CVE-2012-2459 by sending the original block.
# It should be accepted even though it has the same hash as the mutated one.
self.log.info("Test accepting original block after rejecting its mutated version.")
node.p2p.send_blocks_and_test([block2_orig], node, success=True, timeout=5)
# Update tip info
height += 1
block_time += 1
tip = int(block2_orig.hash, 16)
# Complete testing of CVE-2018-17144, by checking for the inflation bug.
# Create a block that spends the output of a tx in a previous block.
block4 = create_block(tip, create_coinbase(height), block_time)
tx3 = create_tx_with_script(tx2, 0, script_sig=b'\x51', amount=50 * COIN)
# Duplicates input
tx3.vin.append(tx3.vin[0])
tx3.rehash()
block4.vtx.append(tx3)
block4.hashMerkleRoot = block4.calc_merkle_root()
block4.rehash()
block4.solve()
self.log.info("Test inflation by duplicating input")
node.p2p.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate')
if __name__ == '__main__':
InvalidBlockRequestTest().main()
|
"""Pipeline class implementing Pipes and Filters pattern.
A generic pipeline to process messages efficiently in a pipes-and-filter manner (multiprocessing possible).
Inspired, but not copied from
https://deparkes.co.uk/2019/12/08/simple-python-pipes-and-filters/
Authors:
- Lukas Block
- Adrian Raiser
Todo:
- Add license boilerplate
"""
import multiprocessing
from functools import partial
import traceback
from collections.abc import Iterable
from typing import Callable
from numpy import sin
class Pipeline(object):
"""Class representing Pipeline.
Class which represents a pipeline within the pipes and filters pattern.
Every pipeline consists of filters added in series to each other.
"""
def __init__(self, with_multiprocessing=False, max_no_processes=8):
"""Constructor.
Args:
with_multiprocessing (bool, optional): Enable multiprocessing. Defaults to False.
max_no_processes (int, optional): If enabled, create the passed amount of subprocesses. Defaults to 8.
"""
self._multiprocessing = with_multiprocessing
if with_multiprocessing:
self._pool = multiprocessing.Pool(max_no_processes)
self._filters = []
def add(self, filter : Callable, batch_processing=False):
"""Add filter to pipeline.
Args:
filter (Callable): A Callable object, taking a message object or an Iterable of message objects as first input. Message objects are any python serializable objects which are passed between filters in the pipeline.
batch_processing (bool, optional): Enable batch processing. The filter must support batch processing by taking an Iterable of message objects as argument. Defaults to False.
"""
assert callable(filter)
self._filters.append((filter, batch_processing))
def insert(self, index, filter, batch_processing=False):
"""Insert filter at provided index to the pipeline.
Args:
index (Int): Index to insert filter on
filter (Callable): Filter to be added
batch_processing (bool, optional): Enable batch processing. The filter must support it. Defaults to False.
"""
assert callable(filter)
self._filters.insert(index, (filter, batch_processing))
def execute(self, message, clbck=None, batch_processing=False):
"""Execute pipeline on passed message or list of messages.
This function spans a new pipeline process and then returns as soon as the
process was created (i.e. the pipeline is not finished!). The callback is
called when the process finishes.
If the number of max processes for this pipeline is reached, the process is
put aside and will be started as soon as one of the currently running processes
finished.
Args:
message (object|List[object]): Message object or list to be piped
clbck (Callable|None, optional): The callback to be called, when the processing of the message
finished. Be careful, the callback will be called from another process
because pipelines are run in parallel. Thus, it might be necessary to use
a multiprocessing.Queue in the clbck to get the result back into the main
process. Furthermore the callback should not block for a time too long,
because this stops further pipelines from being started. Defaults to None.
batch_processing (bool, optional): Enable batch processing. Defaults to False.
Returns:
None
"""
# Check the clbck type
if clbck is not None:
assert callable(clbck)
# If the batch processing is true, the message must be iterable
if batch_processing:
assert isinstance(message, Iterable)
if self._multiprocessing:
# We are doing multiprocessing
# First prepare the call function
fnc = partial(Pipeline.call_fnc, filters=self._filters, message=message, batch_processing=batch_processing)
# Hand it over
if clbck is None:
return self._pool.apply_async(Pipeline.call_fnc, (message, self._filters, batch_processing), error_callback=Pipeline.error_callback)
else:
return self._pool.apply_async(Pipeline.call_fnc, (message, self._filters ,batch_processing), callback=clbck, error_callback=Pipeline.error_callback)
else:
# We are not doing multiprocessing, call the function directly
try:
result = self(message, batch_processing=batch_processing)
except Exception as ex:
Pipeline.error_callback(ex)
if clbck is not None:
clbck(result)
def __call__(self, message, batch_processing=False):
"""Overloads the call operator. See execute for more information.
Args:
message (object|List[object]): See execute
batch_processing (bool, optional): See execute. Defaults to False.
Returns:
ImageMessage|List[ImageMessage]: See execute.
"""
return Pipeline.call_fnc(message, self._filters, batch_processing=batch_processing)
def join(self):
"""Joins all started subprocesses for the pipeline.
Returns:
None: Returns as soon as all subprocesses of the pipelined finished.
"""
if self._multiprocessing:
self._pool.close()
self._pool.join()
def error_callback(e):
"""Prints error and exceptions which might occure within the pipeline.
Args:
e (object): The error or exception which occured in the pipeline
Returns:
None
"""
print("An exception occurred in the pipeline:")
traceback.print_exception(type(e), e, e.__traceback__)
def call_fnc(message, filters, batch_processing=False):
"""Handles the calling of provided filters.
A filter is a function, which takes a certain message, processes it and
return one or more other messages out of it. If the filter should return
As such, the Pipeline itself
is a filter, too.
Args:
message (object): See execute
filters (List[Callable]): List of filters, which will be executed in order
batch_processing (bool, optional): See execute. Defaults to False.
Returns:
object|List[object]: See execute
"""
# Setup the start message(s)
prev_results = None
if batch_processing:
prev_results = message
else:
prev_results = [message]
# Now start processing
for fb in filters:
# The callable is stored as the first value in the tuple
f = fb[0]
new_results = []
# Run each filter for each message from the previous filter
if not fb[1]:
# The filter is not capable of batch processing all previous results
# at once
for pr in prev_results:
single_new_result = f(pr)
# Collect the single or multiple results of this filter
if isinstance(single_new_result, list):
new_results.extend(single_new_result)
elif single_new_result is None:
# Do nothing, because we have an empty result
pass
else:
new_results.append(single_new_result)
else:
# The filter can do batch processing
new_results = f(prev_results)
# After processing all messages from the previous filter, the collected
# results are now the results from the previous filter
prev_results = new_results
# Done with all filters, return
return prev_results
|
from setuptools import setup, find_packages
setup(name="semparse",
description="semparse",
author="Sum-Ting Wong",
author_email="sumting@wo.ng",
install_requires=[],
packages=["semparse"],
)
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PerlMoose(PerlPackage):
"""A postmodern object system for Perl 5"""
homepage = "https://metacpan.org/pod/Moose"
url = "https://search.cpan.org/CPAN/authors/id/E/ET/ETHER/Moose-2.2006.tar.gz"
version('2.2010', sha256='af0905b69f18c27de1177c9bc7778ee495d4ec91be1f223e8ca8333af4de08c5')
version('2.2009', sha256='63ba8a5e27dbcbdbac2cd8f4162fff50a31e9829d8955a196a5898240c02d194')
version('2.2007', sha256='bc75a320b55ba26ac9e60e11a77b3471066cb615bf7097537ed22e20df88afe8')
version('2.2006', sha256='a4e00ab25cc41bebc5e7a11d71375fb5e64b56d5f91159afee225d698e06392b')
depends_on('perl-cpan-meta-check', type=('build', 'run'))
depends_on('perl-test-cleannamespaces', type=('build', 'run'))
depends_on('perl-devel-overloadinfo', type=('build', 'run'))
depends_on('perl-class-load-xs', type=('build', 'run'))
depends_on('perl-devel-stacktrace', type=('build', 'run'))
depends_on('perl-eval-closure', type=('build', 'run'))
depends_on('perl-sub-name', type=('build', 'run'))
depends_on('perl-module-runtime-conflicts', type=('build', 'run'))
depends_on('perl-devel-globaldestruction', type=('build', 'run'))
depends_on('perl-package-deprecationmanager', type=('build', 'run'))
depends_on('perl-package-stash-xs', type=('build', 'run'))
|
import json
from pyscf import gto,scf,mcscf, fci, lo, ci, cc
from pyscf.scf import ROHF, UHF,ROKS
import numpy as np
import pandas as pd
# THIS IS WERE IT STARTS ====================================
df=json.load(open("../../../trail.json"))
spins={'Sc':1, 'Ti':2, 'V':3, 'Cr':6, 'Mn':5, 'Fe':4, 'Cu':1}
nd={'Sc':(1,0), 'Ti':(2,0), 'V':(3,0), 'Cr':(5,0), 'Mn':(5,0), 'Fe':(5,1), 'Cu':(5,5)}
cas={'Sc':3, 'Ti':4, 'V':5, 'Cr':6, 'Mn':7, 'Fe':8, 'Cu':11}
datacsv={}
for nm in ['atom','charge','method','basis','pseudopotential',
'totalenergy','totalenergy-stocherr','totalenergy-syserr']:
datacsv[nm]=[]
basis='vdz'
el='V'
charge=0
mol=gto.Mole()
mol.ecp={}
mol.basis={}
mol.ecp[el]=gto.basis.parse_ecp(df[el]['ecp'])
mol.basis[el]=gto.basis.parse(df[el][basis])
mol.charge=charge
if el == 'Cr' or el == 'Cu':
mol.spin=spins[el]-charge
else:
mol.spin=spins[el]+charge
mol.build(atom="%s 0. 0. 0."%el,verbose=4)
m=ROHF(mol)
m.level_shift=1000.0
dm=m.from_chk("../../../../HF/atoms/"+el+basis+str(charge)+".chk")
hf=m.kernel(dm)
m.analyze()
from pyscf.shciscf import shci
mc = shci.SHCISCF(m, 6, cas[el]-charge)
#mc.fcisolver.conv_tol = 1e-14
mc.fcisolver.mpiprefix="srun -n20"
mc.fcisolver.num_thrds=12
mc.verbose = 4
cas=mc.kernel()[0]
from pyscf.icmpspt import icmpspt
pt=icmpspt.icmpspt(mc,rdmM=500, PTM=1000,\
pttype="MRLCC",\
third_order=True,\
fully_ic=True,\
do_dm4=True)
datacsv['atom'].append(el)
datacsv['charge'].append(charge)
datacsv['method'].append('MRPT')
datacsv['basis'].append(basis)
datacsv['pseudopotential'].append('trail')
datacsv['totalenergy'].append(cas+pt)
datacsv['totalenergy-stocherr'].append(0.0)
datacsv['totalenergy-syserr'].append(0.0)
pd.DataFrame(datacsv).to_csv(el+".csv",index=False)
|
class ALU():
def __init__(self):
self.Rs = None
self.Rt = None
self.Rd = None
def alu(self, opcode):
if (opcode == 0):
self.Rd = self.Rs + self.Rt
return self.Rd
elif (opcode == 1):
self.Rd = self.Rs - self.Rt
return self.Rd
elif (opcode == 2):
self.Rd = int(0) + self.Rt
return self.Rd
elif (opcode == 3): # tipo I == 1
print('não sei o que "BEQ"')
elif (opcode == 4): # tipo J == 2
print('nao sei o que "J"')
elif (opcode == 5 ):
print('nao sei o que "J"')
elif(opcode == 6):
print('nao sei o que "J"')
def setRs(self, Rs_final):
self.Rs = Rs_final
def setRt(self, Rt_final):
self.Rt = Rt_final
def setRd(self, Rd_final):
self.Rd = Rd_final
def getRs(self):
return self.Rs
def getRt(self):
return self.Rt
def getRd(self):
return self.Rd
|
from .intraday import *
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection,PolyCollection
def showNetwork(network,savefig=None):
node_x_coords=[]
node_y_coords=[]
link_coords=[]
poi_coords=[]
for _,node in network.node_dict.items():
node_x_coords.append(node.x_coord)
node_y_coords.append(node.y_coord)
for _,link in network.link_dict.items():
coords = list(link.geometry.coords)
link_coords.append(np.array(coords))
if len(network.POI_list):
for poi in network.POI_list:
coords = list(poi.geometry.exterior.coords)
poi_coords.append(np.array(coords))
fig, ax = plt.subplots(figsize=(12, 8))
# plot network nodes
ax.scatter(node_x_coords, node_y_coords, marker='o', c='red', s=10, zorder=1)
# plot network links
ax.add_collection(LineCollection(link_coords, colors='orange', linewidths=1, zorder=2))
# plot network pois
if len(poi_coords):
coll = PolyCollection(poi_coords, alpha=0.7, zorder=0)
ax.add_collection(coll)
# set axis
ax.autoscale_view()
plt.xlabel('x_coord')
plt.ylabel('y_coord')
plt.tight_layout()
# show fig
plt.show()
# save fig
if savefig:
try:
figname = savefig['filename'] if 'filename' in savefig.keys() else 'network.png'
dpi = savefig['dpi'] if 'dpi' in savefig else 300
fig.savefig(figname, dpi=dpi, bbox_inches='tight')
except Exception as e:
print(e)
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class graceful_restart(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/global/graceful-restart. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state parameters for OSPFv2
graceful restart
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "graceful-restart"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"graceful-restart",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class graceful_restart(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/global/graceful-restart. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration and operational state parameters for OSPFv2
graceful restart
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "graceful-restart"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"graceful-restart",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/graceful_restart/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Operational state parameters relating to OSPFv2 graceful
restart
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
import random
from pytest_bdd import given, when, then # пометки
from model.group import Group
# STEPS FOR ADD GROUP
# предусловие
@given('a group list', target_fixture="group_list") # эти штуки представляют собой фикстуры, а их можно передавать в кач-ве параметра, что мы сделали в ф-ции verify_group_added
def group_list(db):
return db.get_group_list()
# предусловие
@given('a group with <name>, <header> and <footer>', target_fixture="new_group")
def new_group(name, header, footer):
return Group(group_name=name, group_header=header, group_footer=footer)
# действие
@when('I add the group to the list') # это тоже фикстура
def add_new_group(app, new_group):
app.group.create(new_group)
# постусловие
@then('the new group list is equal to the old list with the added group') # и это тоже фикстура
def verify_group_added(db, group_list, new_group):
old_groups_list = group_list
new_groups_list = db.get_group_list()
old_groups_list += [new_group]
assert sorted(old_groups_list, key=Group.id_or_max) == sorted(new_groups_list, key=Group.id_or_max)
# STEPS FOR DELETE GROUP
@given('non empty group list', target_fixture="non_empty_group_list") # эти штуки представляют собой фикстуры, а их можно передавать в кач-ве параметра, что мы сделали в ф-ции verify_group_added
def non_empty_group_list(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(group_name="name", group_header="header", group_footer="footer"))
return db.get_group_list()
@given('a random group from non empty group list', target_fixture="random_group")
def random_group(non_empty_group_list):
return random.choice(non_empty_group_list)
@when('I delete the group from the list') # это тоже фикстура
def del_some_group(app, random_group):
app.group.delete_group_by_id(random_group.id)
@then('the new group list is equal to the old list without deleted group') # и это тоже фикстура
def verify_group_deleted(db, non_empty_group_list, random_group):
old_groups_list = non_empty_group_list
new_groups_list = db.get_group_list()
old_groups_list.remove(random_group)
assert sorted(old_groups_list, key=Group.id_or_max) == sorted(new_groups_list, key=Group.id_or_max)
# STEPS FOR MODIFY GROUP
@given('a new group with <new_name>, <new_header> and <new_footer>', target_fixture="new_group_for_modify")
def new_group_for_modify(new_name, new_header, new_footer):
return Group(group_name=new_name, group_header=new_header, group_footer=new_footer)
@when('I modify the group from the list') # это тоже фикстура
def modify_some_group(app, random_group, new_group_for_modify):
app.group.modify_group_by_id(random_group.id, new_group_for_modify)
@then('the new group list is equal to the old list with modify group') # и это тоже фикстура
def verify_group_modify(db, non_empty_group_list, random_group, new_group_for_modify):
old_groups_list = non_empty_group_list
new_groups_list = db.get_group_list()
res_old_groups = []
for i in range(len(old_groups_list)):
if str(old_groups_list[i].id) != str(random_group.id):
res_old_groups += [old_groups_list[i]]
if str(old_groups_list[i].id) == str(random_group.id):
res_old_groups += [new_group_for_modify]
assert res_old_groups == sorted(new_groups_list, key=Group.id_or_max)
|
# Generated by Django 3.1.4 on 2020-12-23 21:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blackbook', '0013_add_uuid_to_other_models'),
]
operations = [
migrations.AlterField(
model_name='transactionjournalentry',
name='budget',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='transactions', to='blackbook.budgetperiod'),
),
]
|
from typing import Union, Tuple
from torch_geometric.typing import OptTensor, OptPairTensor, Adj, Size
from torch import Tensor
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.nn.conv import MessagePassing
class GraphConv(MessagePassing):
r"""The graph neural network operator from the `"Weisfeiler and Leman Go
Neural: Higher-order Graph Neural Networks"
<https://arxiv.org/abs/1810.02244>`_ paper
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta}_1 \mathbf{x}_i +
\mathbf{\Theta}_2 \sum_{j \in \mathcal{N}(i)} e_{j,i} \cdot
\mathbf{x}_j
where :math:`e_{j,i}` denotes the edge weight from source node :obj:`j` to
target node :obj:`i` (default: :obj:`1`)
Args:
in_channels (int or tuple): Size of each input sample, or :obj:`-1` to
derive the size from the first input(s) to the forward method.
A tuple corresponds to the sizes of source and target
dimensionalities.
out_channels (int): Size of each output sample.
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"add"`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(
self,
in_channels: Union[int, Tuple[int, int]],
out_channels: int,
aggr: str = 'add',
bias: bool = True,
**kwargs,
):
super(GraphConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
self.lin_rel = Linear(in_channels[0], out_channels, bias=bias)
self.lin_root = Linear(in_channels[1], out_channels, bias=False)
self.reset_parameters()
def reset_parameters(self):
self.lin_rel.reset_parameters()
self.lin_root.reset_parameters()
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_weight: OptTensor = None, size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
# propagate_type: (x: OptPairTensor, edge_weight: OptTensor)
out = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=size)
out = self.lin_rel(out)
x_r = x[1]
if x_r is not None:
out += self.lin_root(x_r)
return out
def message(self, x_j: Tensor, edge_weight: OptTensor) -> Tensor:
return x_j if edge_weight is None else edge_weight.view(-1, 1) * x_j
def message_and_aggregate(self, adj_t: SparseTensor,
x: OptPairTensor) -> Tensor:
return matmul(adj_t, x[0], reduce=self.aggr)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
|
from optparse import make_option
from django.core.management.base import BaseCommand
from apps.statistics.models import MStatistics
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
)
def handle(self, *args, **options):
MStatistics.collect_statistics()
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Brendan Christy <brendan.christy@hs-rm.de>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
from litex.build.openocd import OpenOCD
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk200", 0,
Subsignal("p", Pins("R4"), IOStandard("DIFF_SSTL15")),
Subsignal("n", Pins("T4"), IOStandard("DIFF_SSTL15"))
),
("clk125", 0,
Subsignal("p", Pins("F6"), IOStandard("DIFF_SSTL15")),
Subsignal("n", Pins("E6"), IOStandard("DIFF_SSTL15"))
),
("cpu_reset", 0, Pins("T6"), IOStandard("SSTL15")),
# DDR3 SDRAM
("ddram", 0,
Subsignal("a", Pins("AA4 AB2 AA5 AB5 AB1 U3 W1 T1 V2 U2 Y1 W2 Y2 U1 V3"), IOStandard("SSTL15")),
Subsignal("ba", Pins("AA3 Y3 Y4"), IOStandard("SSTL15")),
Subsignal("ras_n", Pins("V4"), IOStandard("SSTL15")),
Subsignal("cas_n", Pins("W4"), IOStandard("SSTL15")),
Subsignal("we_n", Pins("AA1"), IOStandard("SSTL15")),
Subsignal("dm", Pins("D2 G2 M2 M5"), IOStandard("SSTL15")),
Subsignal("dq", Pins("C2 G1 A1 F3 B2 F1 B1 E2 H3 G3 H2 H5 J1 J5 K1 H4 L4 M3 L3 J6 K3 K6 J4 L5 P1 N4 R1 N2 M6 N5 P6 P2"), IOStandard("SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_p", Pins("E1 K2 M1 P5"), IOStandard("DIFF_SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_n", Pins("D1 J2 L1 P4"), IOStandard("DIFF_SSTL15"), Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("clk_p", Pins("R3"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_n", Pins("R2"), IOStandard("DIFF_SSTL15")),
Subsignal("cke", Pins("T5"), IOStandard("SSTL15")),
Subsignal("odt", Pins("U5"), IOStandard("SSTL15")),
Subsignal("cs_n", Pins("AB3"), IOStandard("SSTL15")),
Subsignal("reset_n", Pins("W6"), IOStandard("LVCMOS15")),
Misc("SLEW=FAST"),
),
# UART
("serial", 0,
Subsignal("tx", Pins("AB15")),
Subsignal("rx", Pins("AA15")),
IOStandard("LVCMOS33"),
),
# GMII Ethernet
("eth_clocks_ext", 0,
Subsignal("tx", Pins("K21")),
Subsignal("gtx", Pins("G21")),
Subsignal("rx", Pins("K18")),
IOStandard("LVCMOS33")
),
("eth1_clocks_ext", 0,
Subsignal("tx", Pins("T14")),
Subsignal("gtx", Pins("M16")),
Subsignal("rx", Pins("J20")),
IOStandard("LVCMOS33")
),
("eth2_clocks_ext", 0,
Subsignal("tx", Pins("V10")),
Subsignal("gtx", Pins("AA21")),
Subsignal("rx", Pins("V13")),
IOStandard("LVCMOS33")
),
("eth3_clocks_ext", 0,
Subsignal("tx", Pins("U16")),
Subsignal("gtx", Pins("P20")),
Subsignal("rx", Pins("Y18")),
IOStandard("LVCMOS33")
),
("eth", 0,
Subsignal("rst_n", Pins("G20")),
Subsignal("int_n", Pins("D14"), Misc("KEEPER = TRUE")),
Subsignal("mdio", Pins("L16")),
Subsignal("mdc", Pins("J17")),
Subsignal("rx_dv", Pins("M22")),
Subsignal("rx_er", Pins("N18")),
Subsignal("rx_data", Pins("N22 H18 H17 M21 L21 N20 M20 N19")),
Subsignal("tx_en", Pins("G22")),
Subsignal("tx_er", Pins("K17")),
Subsignal("tx_data", Pins("D22 H20 H22 J22 K22 L19 K19 L20")),
Subsignal("col", Pins("M18")),
Subsignal("crs", Pins("L18")),
IOStandard("LVCMOS33")
),
("eth", 1,
Subsignal("rst_n", Pins("L14")),
Subsignal("int_n", Pins("E14"), Misc("KEEPER = TRUE")),
Subsignal("mdc", Pins("AB21")),
Subsignal("mdio", Pins("AB22")),
Subsignal("rx_dv", Pins("L13")),
Subsignal("rx_er", Pins("G13")),
Subsignal("rx_data", Pins("M13 K14 K13 J14 H14 H15 J15 H13")),
Subsignal("tx_en", Pins("M15")),
Subsignal("tx_er", Pins("T15")),
Subsignal("tx_data", Pins("L15 K16 W15 W16 V17 W17 U15 V15")),
Subsignal("col", Pins("J21")),
Subsignal("crs", Pins("E22")),
IOStandard("LVCMOS33")
),
("eth", 2,
Subsignal("rst_n", Pins("T20")),
Subsignal("int_n", Pins("E13"), Misc("KEEPER = TRUE")),
Subsignal("mdc", Pins("V20")),
Subsignal("mdio", Pins("V19")),
Subsignal("rx_dv", Pins("AA20")),
Subsignal("rx_er", Pins("U21")),
Subsignal("rx_data", Pins("AB20 AA19 AA18 AB18 Y17 W22 W21 T21")),
Subsignal("tx_en", Pins("V14")),
Subsignal("tx_er", Pins("AA9")),
Subsignal("tx_data", Pins("W11 W12 Y11 Y12 W10 AA11 AA10 AB10")),
Subsignal("col", Pins("Y21")),
Subsignal("crs", Pins("Y22")),
IOStandard("LVCMOS33")
),
("eth", 3,
Subsignal("rst_n", Pins("R16")),
Subsignal("int_n", Pins("F13")),
Subsignal("mdc", Pins("V18")),
Subsignal("mdio", Pins("U20")),
Subsignal("rx_dv", Pins("W20")),
Subsignal("rx_er", Pins("N13")),
Subsignal("rx_data", Pins("W19 Y19 V22 U22 T18 R18 R14 P14")),
Subsignal("tx_en", Pins("P16")),
Subsignal("tx_er", Pins("R19")),
Subsignal("tx_data", Pins("R17 P15 N17 P17 T16 U17 U18 P19")),
Subsignal("col", Pins("N14")),
Subsignal("crs", Pins("N15")),
IOStandard("LVCMOS33")
),
("spisdcard", 0,
Subsignal("clk", Pins("J16")),
Subsignal("mosi", Pins("A20"), Misc("PULLUP true")),
Subsignal("cs_n", Pins("B22"), Misc("PULLUP true")),
Subsignal("miso", Pins("F20"), Misc("PULLUP true")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS33"),
),
("sdcard", 0,
Subsignal("data", Pins("F20 C22 B20 B22"), Misc("PULLUP true")),
Subsignal("cmd", Pins("A20"), Misc("PULLUP true")),
Subsignal("clk", Pins("J16")),
Subsignal("cd", Pins("F19")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS33"),
)
]
_connectors = []
class Platform(XilinxPlatform):
default_clk_name = "clk200"
default_clk_period = 1e9/200e6
def __init__(self) -> None:
XilinxPlatform.__init__(self, "xc7a100t-fgg484-2", _io, _connectors, toolchain="vivado")
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 34]")
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 35]")
def create_programmer(self):
return OpenOCD("openocd_ax7101.cfg", "bscan_spi_xc7a100t.bit")
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk200", loose=True), 1e9/200e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth1_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth2_clocks_ext:rx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:gtx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:tx", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("eth3_clocks_ext:rx", loose=True), 1e9/125e6)
|
from functools import total_ordering
from django.db.migrations.state import ProjectState
from .exceptions import CircularDependencyError, NodeNotFoundError
@total_ordering
class Node:
"""
A single node in the migration graph. Contains direct links to adjacent
nodes in either direction.
"""
def __init__(self, key):
self.key = key
self.children = set()
self.parents = set()
def __eq__(self, other):
return self.key == other
def __lt__(self, other):
return self.key < other
def __hash__(self):
return hash(self.key)
def __getitem__(self, item):
return self.key[item]
def __str__(self):
return str(self.key)
def __repr__(self):
return '<%s: (%r, %r)>' % (self.__class__.__name__, self.key[0], self.key[1])
def add_child(self, child):
self.children.add(child)
def add_parent(self, parent):
self.parents.add(parent)
class DummyNode(Node):
"""
A node that doesn't correspond to a migration file on disk.
(A squashed migration that was removed, for example.)
After the migration graph is processed, all dummy nodes should be removed.
If there are any left, a nonexistent dependency error is raised.
"""
def __init__(self, key, origin, error_message):
super().__init__(key)
self.origin = origin
self.error_message = error_message
def raise_error(self):
raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)
class MigrationGraph:
"""
Represent the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.node_map = {}
self.nodes = {}
def add_node(self, key, migration):
assert key not in self.node_map
node = Node(key)
self.node_map[key] = node
self.nodes[key] = migration
def add_dummy_node(self, key, origin, error_message):
node = DummyNode(key, origin, error_message)
self.node_map[key] = node
self.nodes[key] = None
def add_dependency(self, migration, child, parent, skip_validation=False):
"""
This may create dummy nodes if they don't yet exist. If
`skip_validation=True`, validate_consistency() should be called
afterwards.
"""
if child not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" child node %r" % (migration, child)
)
self.add_dummy_node(child, migration, error_message)
if parent not in self.nodes:
error_message = (
"Migration %s dependencies reference nonexistent"
" parent node %r" % (migration, parent)
)
self.add_dummy_node(parent, migration, error_message)
self.node_map[child].add_parent(self.node_map[parent])
self.node_map[parent].add_child(self.node_map[child])
if not skip_validation:
self.validate_consistency()
def remove_replaced_nodes(self, replacement, replaced):
"""
Remove each of the `replaced` nodes (when they exist). Any
dependencies that were referencing them are changed to reference the
`replacement` node instead.
"""
# Cast list of replaced keys to set to speed up lookup later.
replaced = set(replaced)
try:
replacement_node = self.node_map[replacement]
except KeyError as err:
raise NodeNotFoundError(
"Unable to find replacement node %r. It was either never added"
" to the migration graph, or has been removed." % (replacement,),
replacement
) from err
for replaced_key in replaced:
self.nodes.pop(replaced_key, None)
replaced_node = self.node_map.pop(replaced_key, None)
if replaced_node:
for child in replaced_node.children:
child.parents.remove(replaced_node)
# We don't want to create dependencies between the replaced
# node and the replacement node as this would lead to
# self-referencing on the replacement node at a later iteration.
if child.key not in replaced:
replacement_node.add_child(child)
child.add_parent(replacement_node)
for parent in replaced_node.parents:
parent.children.remove(replaced_node)
# Again, to avoid self-referencing.
if parent.key not in replaced:
replacement_node.add_parent(parent)
parent.add_child(replacement_node)
def remove_replacement_node(self, replacement, replaced):
"""
The inverse operation to `remove_replaced_nodes`. Almost. Remove the
replacement node `replacement` and remap its child nodes to `replaced`
- the list of nodes it would have replaced. Don't remap its parent
nodes as they are expected to be correct already.
"""
self.nodes.pop(replacement, None)
try:
replacement_node = self.node_map.pop(replacement)
except KeyError as err:
raise NodeNotFoundError(
"Unable to remove replacement node %r. It was either never added"
" to the migration graph, or has been removed already." % (replacement,),
replacement
) from err
replaced_nodes = set()
replaced_nodes_parents = set()
for key in replaced:
replaced_node = self.node_map.get(key)
if replaced_node:
replaced_nodes.add(replaced_node)
replaced_nodes_parents |= replaced_node.parents
# We're only interested in the latest replaced node, so filter out
# replaced nodes that are parents of other replaced nodes.
replaced_nodes -= replaced_nodes_parents
for child in replacement_node.children:
child.parents.remove(replacement_node)
for replaced_node in replaced_nodes:
replaced_node.add_child(child)
child.add_parent(replaced_node)
for parent in replacement_node.parents:
parent.children.remove(replacement_node)
# NOTE: There is no need to remap parent dependencies as we can
# assume the replaced nodes already have the correct ancestry.
def validate_consistency(self):
"""Ensure there are no dummy nodes remaining in the graph."""
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def forwards_plan(self, target):
"""
Given a node, return a list of which previous nodes (dependencies) must
be applied, ending with the node itself. This is the list you would
follow if applying the migrations to a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target])
def backwards_plan(self, target):
"""
Given a node, return a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself. This is the list you
would follow if removing the migrations from a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target,), target)
return self.iterative_dfs(self.node_map[target], forwards=False)
def iterative_dfs(self, start, forwards=True):
"""Iterative depth-first search for finding dependencies."""
visited = []
visited_set = set()
stack = [(start, False)]
while stack:
node, processed = stack.pop()
if node in visited_set:
pass
elif processed:
visited_set.add(node)
visited.append(node.key)
else:
stack.append((node, True))
stack += [(n, False) for n in sorted(node.parents if forwards else node.children)]
return visited
def root_nodes(self, app=None):
"""
Return all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].parents) and (not app or app == node[0]):
roots.add(node)
return sorted(roots)
def leaf_nodes(self, app=None):
"""
Return all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if all(key[0] != node[0] for key in self.node_map[node].children) and (not app or app == node[0]):
leaves.add(node)
return sorted(leaves)
def ensure_not_cyclic(self):
# Algo from GvR:
# https://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
todo = set(self.nodes)
while todo:
node = todo.pop()
stack = [node]
while stack:
top = stack[-1]
for child in self.node_map[top].children:
# Use child.key instead of child to speed up the frequent
# hashing.
node = child.key
if node in stack:
cycle = stack[stack.index(node):]
raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle))
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
def __str__(self):
return 'Graph: %s nodes, %s edges' % self._nodes_and_edges()
def __repr__(self):
nodes, edges = self._nodes_and_edges()
return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges)
def _nodes_and_edges(self):
return len(self.nodes), sum(len(node.parents) for node in self.node_map.values())
def _generate_plan(self, nodes, at_end):
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan and (at_end or migration not in nodes):
plan.append(migration)
return plan
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, return a complete ProjectState for it.
If at_end is False, return the state before the migration has run.
If nodes is not provided, return the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if not nodes:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = self._generate_plan(nodes, at_end)
project_state = ProjectState(real_apps=real_apps)
for node in plan:
project_state = self.nodes[node].mutate_state(project_state, preserve=False)
return project_state
def __contains__(self, node):
return node in self.nodes
|
# compare contents of two files in binary form
import sys
def compareFile(srcFile,destFile):
with open(srcFile,"rb") as src:
srcData = src.read()
with open(destFile,"rb") as dest:
destData = dest.read()
checked = False
if(len(srcData)!=len(destData)):
print("It unequal between ",srcFile,destFile,". The file size is different")
checked = True
for i in range(min(len(srcData),len(destData))):
if(srcData[i] != destData[i]):
print("unequal index:%d, modleDatata:%d, flashData:%d " % (i,srcData[i],destData[i]))
checked = True
if checked:
print('Check Result: unequal')
else:
print('Check Result: equal')
def main():
if(len(sys.argv) !=3 ):
print('Wrong parameters,need two files')
return
compareFile(sys.argv[1],sys.argv[2])
if __name__ == '__main__':
main()
|
import math
import itertools
digits = []
def search():
for perm in itertools.combinations(digits, 6):
total = 0.0
for x in perm:
total += (1 / x)
if total > 1.0:
break
if total == 1.0:
print('Solution: ' + str(perm))
return True
return False
max_digit = 6
while True:
digits = []
for i in range(1, max_digit + 1):
digits.append(i)
print('Max Digit: ' + str(max_digit))
if search():
break
max_digit += 1
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import json
import os
import pprint
from abc import ABCMeta
# HACK: to avoid an error on import if leveldict is not installed
try:
from leveldict import LevelDictSerialized
except ImportError as e:
# use type and not metaclass because of the singleton
LevelDictSerialized = type
from lib.common.oopatterns import ParametricSingletonMetaClass
log = logging.getLogger(__name__)
# =============
# Serializers
# =============
class NSRLSerializer(object):
fields = None
@classmethod
def loads(cls, value):
value = json.loads(value)
if isinstance(value[0], list):
result = [dict((field, col[index])
for index, field in
enumerate(cls.fields)) for col in value]
else:
result = dict((field, value[index])
for index, field in
enumerate(cls.fields))
return result
@classmethod
def dumps(cls, value):
def detect_charset(string):
import chardet
return chardet.detect(string)['encoding']
try:
if isinstance(value, list):
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
result = json.dumps([value.get(col) for col in cls.fields])
except Exception:
if isinstance(value, list):
for row in value:
for colkey, colval in list(row.items()):
if not isinstance(colval, str):
charset = detect_charset(colval)
if charset is None:
charset = 'unicode-escape'
try:
row[colkey] = colval.decode(charset)
except:
row[colkey] = colval.decode('unicode-escape')
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
for colkey, colval in list(value.items()):
if not isinstance(colval, str):
# try to use chardet to find encoding
charset = detect_charset(colval)
if charset is None:
charset = 'unicode-escape'
try:
value[colkey] = colval.decode(charset)
except:
# treat false positive from chardet
value[colkey] = colval.decode('unicode-escape')
result = json.dumps([value.get(col) for col in cls.fields])
return result
class NSRLOsSerializer(NSRLSerializer):
fields = ['OpSystemVersion', 'OpSystemName', 'MfgCode']
class NSRLFileSerializer(NSRLSerializer):
fields = ["MD5", "CRC32", "FileName", "FileSize",
"ProductCode", "OpSystemCode", "SpecialCode"]
@classmethod
def dumps(cls, value):
def detect_charset(string):
import chardet
return chardet.detect(string)['encoding']
try:
if isinstance(value, list):
result = json.dumps([[row.get(key) for key in cls.fields] for row in value])
else:
result = json.dumps([value.get(col) for col in cls.fields])
except Exception:
# failed to json it, bruteforce encoding
if isinstance(value, list):
for row in value:
fname = row['FileName']
if not isinstance(fname, str):
charset = detect_charset(fname)
charset = 'unicode-escape' if not charset else charset
try:
row['FileName'] = fname.decode(charset)
except:
# treat false positive from chardet
row['FileName'] = fname.decode('unicode-escape')
result = json.dumps([[col.get(key) for key in cls.fields] for col in value])
else:
fname = value['FileName']
if not isinstance(fname, str):
# try to use chardet to find encoding
charset = detect_charset(fname)
charset = 'unicode-escape' if not charset else charset
try:
value['FileName'] = fname.decode(charset)
except:
# treat false positive from chardet
value['FileName'] = fname.decode('unicode-escape')
result = json.dumps([value.get(col) for col in cls.fields])
return result
class NSRLManufacturerSerializer(NSRLSerializer):
fields = ["MfgName"]
class NSRLProductSerializer(NSRLSerializer):
fields = ["ProductName", "ProductVersion", "OpSystemCode",
"MfgCode", "Language", "ApplicationType"]
# ==============
# NSRL records
# ==============
# Hack to avoid metaclass conflicts
class LevelDBSingletonMetaClass(ABCMeta, ParametricSingletonMetaClass):
pass
LevelDBSingleton = LevelDBSingletonMetaClass('LevelDBSingleton', (object,), {})
class NSRLLevelDict(LevelDictSerialized, LevelDBSingleton):
key = None
@staticmethod
def depends_on(cls, *args, **kwargs):
# singleton depends on the uri parameter
(db,) = args[0]
return os.path.abspath(db)
def __init__(self, db, serializer=json, **kwargs):
super(NSRLLevelDict, self).__init__(db, serializer, **kwargs)
@classmethod
def create_database(cls, dbfile, records, **kwargs):
# import specific modules
from csv import DictReader
log_threshold = 50000
# create database
db = cls(dbfile, **kwargs)
# open csv files
csv_file = open(records, 'r')
csv_entries = DictReader(csv_file)
for index, row in enumerate(csv_entries):
key = row.pop(cls.key)
value = db.get(key, None)
if not value:
db[key] = row
else:
if isinstance(value, dict):
db[key] = [value, row]
else:
# db[key].append([row]) is not possible as changes are only
# made in memory and __setitem__ is never called
db[key] = value + [row]
if (index % log_threshold) == 0:
print(("Current progress: {0}".format(index)))
return db
# ==================
# NSRL File Record
# ==================
class NSRLFile(NSRLLevelDict):
key = "SHA-1"
def __init__(self, db, **kwargs):
super(NSRLFile, self).__init__(db, NSRLFileSerializer, **kwargs)
# =================
# NSRL OS Record
# =================
class NSRLOs(NSRLLevelDict):
key = "OpSystemCode"
def __init__(self, db, **kwargs):
super(NSRLOs, self).__init__(db,
NSRLOsSerializer,
**kwargs)
# ================
# NSRL OS Record
# ================
class NSRLManufacturer(NSRLLevelDict):
key = "MfgCode"
def __init__(self, db, **kwargs):
super(NSRLManufacturer, self).__init__(db,
NSRLManufacturerSerializer,
**kwargs)
# =====================
# NSRL Product Record
# =====================
class NSRLProduct(NSRLLevelDict):
key = "ProductCode"
def __init__(self, db, **kwargs):
super(NSRLProduct, self).__init__(db,
NSRLProductSerializer,
**kwargs)
# =============
# NSRL module
# =============
class NSRL(object):
def __init__(self,
nsrl_file,
nsrl_product,
nsrl_os, nsrl_manufacturer,
**kwargs):
# TODO: need to specify paths in constructor,
# temporary pass via kwargs
self.nsrl_file = NSRLFile(nsrl_file)
self.nsrl_product = NSRLProduct(nsrl_product)
self.nsrl_os = NSRLOs(nsrl_os)
self.nsrl_manufacturer = NSRLManufacturer(nsrl_manufacturer)
def _lookup_file(self, sha1sum):
return self.nsrl_file[sha1sum]
def _lookup_product(self, product_code):
return self.nsrl_product[product_code]
def _lookup_os(self, op_system_code):
return self.nsrl_os[op_system_code]
def _lookup_manufacturer(self, manufacturer_code):
return self.nsrl_manufacturer[manufacturer_code]
def lookup_by_sha1(self, sha1sum):
operations = [
(sha1sum, 'SHA-1', self.nsrl_file, None),
(None, 'ProductCode', self.nsrl_product, 'SHA-1'),
(None, 'OpSystemCode', self.nsrl_os, 'SHA-1'),
(None, 'MfgCode', self.nsrl_manufacturer, 'ProductCode')
]
entries = dict((name, {}) for (_, name, _, _) in operations)
try:
for value, key, database, where in operations:
if value:
entries[key][value] = database[value]
else:
subkeys = set()
for subkey, subitem in list(entries[where].items()):
if not isinstance(subitem, list):
subitem = [subitem]
subkeys.update([x[key] for x in subitem])
for subkey in subkeys:
entries[key][subkey] = database[subkey]
except:
pass
return entries
##############################################################################
# CLI for debug purposes
##############################################################################
if __name__ == '__main__':
##########################################################################
# local import
##########################################################################
import argparse
##########################################################################
# defined functions
##########################################################################
nsrl_databases = {
'file': NSRLFile,
'os': NSRLOs,
'manufacturer': NSRLManufacturer,
'product': NSRLProduct,
}
def nsrl_create_database(**kwargs):
database_type = kwargs['type']
nsrl_databases[database_type].create_database(kwargs['database'],
kwargs['filename'])
def nsrl_get(**kwargs):
database_type = kwargs['type']
database = nsrl_databases[database_type](kwargs['database'],
block_cache_size=1 << 30,
max_open_files=3000)
value = database.get(kwargs['key'])
print(("key {0}: value {1}".format(kwargs['key'], value)))
def nsrl_resolve(**kwargs):
# TODO: handle in a better way NRSL object
kwargs['nsrl_file_db'] = kwargs['file']
kwargs['nsrl_prod_db'] = kwargs['product']
kwargs['nsrl_os_db'] = kwargs['os']
kwargs['nsrl_mfg_db'] = kwargs['manufacturer']
handle = NSRL(**kwargs)
print((pprint.pformat(handle.lookup_by_sha1(kwargs['sha1']))))
##########################################################################
# arguments
##########################################################################
# define command line arguments
desc_msg = 'NSRL database module CLI mode'
parser = argparse.ArgumentParser(description=desc_msg)
parser.add_argument('-v',
'--verbose',
action='count',
default=0)
subparsers = parser.add_subparsers(help='sub-command help')
# create the create parser
help_msg = 'create NSRL records into a database'
create_parser = subparsers.add_parser('create',
help=help_msg)
create_parser.add_argument('-t',
'--type',
type=str,
choices=['file', 'os',
'manufacturer', 'product'],
help='type of the record')
create_parser.add_argument('filename',
type=str,
help='filename of the NSRL record')
create_parser.add_argument('database',
type=str,
help='database to store NSRL records')
create_parser.set_defaults(func=nsrl_create_database)
# create the scan parser
get_parser = subparsers.add_parser('get',
help='get the entry from database')
get_parser.add_argument('-t',
'--type',
type=str,
choices=['file', 'os', 'manufacturer', 'product'],
help='type of the record')
get_parser.add_argument('database',
type=str,
help='database to read NSRL records')
get_parser.add_argument('key',
type=str,
help='key to retreive')
get_parser.set_defaults(func=nsrl_get)
# create the scan parser
get_parser = subparsers.add_parser('resolve',
help='resolve from sha1')
get_parser.add_argument('file',
type=str,
help='filename for file records')
get_parser.add_argument('product',
type=str,
help='filename for product records')
get_parser.add_argument('os',
type=str,
help='filename for os records')
get_parser.add_argument('manufacturer',
type=str,
help='filename for manufacturer records')
get_parser.add_argument('sha1',
type=str,
help='sha1 to lookup')
get_parser.set_defaults(func=nsrl_resolve)
args = parser.parse_args()
# set verbosity
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
args = vars(parser.parse_args())
func = args.pop('func')
# with 'func' removed, args is now a kwargs
# with only the specific arguments
# for each subfunction useful for interactive mode.
func(**args)
|
"""Chartexchange view"""
__docformat__ = "numpy"
import os
from tabulate import tabulate
from gamestonk_terminal.helper_funcs import export_data
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.options import chartexchange_model
def display_raw(
export: str, ticker: str, date: str, call: bool, price: str, num: int = 20
) -> None:
"""Return raw stock data[chartexchange]
Parameters
----------
export : str
Export data as CSV, JSON, XLSX
ticker : str
Ticker for the given option
date : str
Date of expiration for the option
call : bool
Whether the underlying asset should be a call or a put
price : float
The stike of the expiration
num : int
Number of rows to show
"""
df = chartexchange_model.get_option_history(ticker, date, call, price)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"hist",
df,
)
if gtff.USE_TABULATE_DF:
print(
tabulate(
df.head(num),
headers=df.columns,
tablefmt="fancy_grid",
showindex=True,
floatfmt=".2f",
)
)
else:
print(df.to_string(index=False))
print("")
|
# coding: utf-8
# ----------------------------------------------------------------------------
# <copyright company="Aspose" file="ai_bcr_parse_request.py">
# Copyright (c) 2018-2020 Aspose Pty Ltd. All rights reserved.
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </summary>
# ----------------------------------------------------------------------------
from AsposeEmailCloudSdk.models import *
class AiBcrParseRequest(object):
"""
Request model for ai_bcr_parse operation.
Initializes a new instance.
:param file: File to parse
:type file: str
:param countries: Comma-separated codes of countries.
:type countries: str
:param languages: Comma-separated ISO-639 codes of languages (either 639-1 or 639-3; i.e. \"it\" or \"ita\" for Italian); it's \"\" by default.
:type languages: str
:param is_single: Determines that image contains single VCard or more.
:type is_single: bool
"""
def __init__(self, file: str, countries: str = None, languages: str = None, is_single: bool = None):
"""
Request model for ai_bcr_parse operation.
Initializes a new instance.
:param file: File to parse
:type file: str
:param countries: Comma-separated codes of countries.
:type countries: str
:param languages: Comma-separated ISO-639 codes of languages (either 639-1 or 639-3; i.e. \"it\" or \"ita\" for Italian); it's \"\" by default.
:type languages: str
:param is_single: Determines that image contains single VCard or more.
:type is_single: bool
"""
self.file = file
self.countries = countries
self.languages = languages
self.is_single = is_single
|
from typing import Dict
from typing import List
import numpy
from fbsrankings.domain.model.affiliation import Subdivision
from fbsrankings.domain.model.game import Game
from fbsrankings.domain.model.game import GameStatus
from fbsrankings.domain.model.ranking import Ranking
from fbsrankings.domain.model.ranking import SeasonData
from fbsrankings.domain.model.ranking import TeamRankingRepository
from fbsrankings.domain.model.ranking import TeamRankingService
from fbsrankings.domain.model.team import TeamID
class TeamData:
def __init__(self, index: int) -> None:
self.index = index
self.game_total = 0
self.point_margin = 0
def add_game(self, point_margin: int) -> None:
self.game_total += 1
self.point_margin += point_margin
class SRSRankingService(TeamRankingService):
name: str = "SRS"
def __init__(self, repository: TeamRankingRepository) -> None:
self._repository = repository
def calculate_for_season(self, season_data: SeasonData) -> List[Ranking[TeamID]]:
team_data: Dict[TeamID, TeamData] = {}
for affiliation in season_data.affiliation_map.values():
if affiliation.subdivision == Subdivision.FBS:
team_data[affiliation.team_id] = TeamData(len(team_data))
season_is_complete = True
games_by_week: Dict[int, List[Game]] = {}
for game in season_data.game_map.values():
winning_data = None
if game.winning_team_id is not None:
winning_data = team_data.get(game.winning_team_id)
losing_data = None
if game.losing_team_id is not None:
losing_data = team_data.get(game.losing_team_id)
if winning_data is not None and losing_data is not None:
week_games = games_by_week.setdefault(game.week, [])
week_games.append(game)
elif game.status == GameStatus.SCHEDULED:
season_is_complete = False
n = len(team_data)
a = numpy.zeros((n + 1, n))
b = numpy.zeros(n + 1)
rankings = []
for week in sorted(games_by_week.keys()):
for game in games_by_week[week]:
if (
game.home_team_score is not None
and game.away_team_score is not None
):
home_data = team_data[game.home_team_id]
away_data = team_data[game.away_team_id]
home_margin = self._adjust_margin(
game.home_team_score - game.away_team_score,
)
home_data.add_game(home_margin)
away_data.add_game(-home_margin)
a[home_data.index, away_data.index] -= 1.0
a[away_data.index, home_data.index] -= 1.0
for data in team_data.values():
a[data.index, data.index] = data.game_total
b[data.index] = data.point_margin
a[n, data.index] = 1.0
b[n] = 0.0
x = numpy.linalg.lstsq(a, b, rcond=-1)[0]
result = {id_: x[data.index] for id_, data in team_data.items()}
ranking_values = TeamRankingService._to_values(season_data, result)
rankings.append(
self._repository.create(
SRSRankingService.name,
season_data.season.id_,
week,
ranking_values,
),
)
if season_is_complete:
rankings.append(
self._repository.create(
SRSRankingService.name,
season_data.season.id_,
None,
ranking_values,
),
)
return rankings
@staticmethod
def _adjust_margin(margin: int) -> int:
if margin > 24:
return 24
if margin < -24:
return -24
if 0 < margin < 7:
return 7
if 0 > margin > -7:
return -7
return margin
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
"requests-futures",
"xmltodict",
"PyYAML"
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Shane Donohoe",
author_email='shane@donohoe.cc',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Output youtube subscriptions using subscription_manager file",
install_requires=requirements,
entry_points={
"console_scripts": [
'youtube_sm_parser = youtube_sm_parser.youtube_sm_parser:main'
]
},
license="Apache Software License 2.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='youtube_sm_parser',
name='youtube_sm_parser',
packages=find_packages(include=['youtube_sm_parser']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/shanedabes/youtube_sm_parser',
version='0.1.5',
zip_safe=False,
)
|
from django.conf.urls import url
from rest_framework.routers import SimpleRouter, Route
class DiscoveryAPIRouter(SimpleRouter):
routes = [
# List route.
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Detail route.
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
),
# Values route.
Route(
url=r'^{prefix}/values/{field_lookup}{trailing_slash}$',
mapping={
'get': 'values'
},
name='{basename}-values',
initkwargs={'suffix': 'Values'}
),
# Count route.
Route(
url=r'^{prefix}/count/{field_lookup}{trailing_slash}$',
mapping={
'get': 'count'
},
name='{basename}-count',
initkwargs={'suffix': 'Count'}
)
]
def __init__(self):
self.trailing_slash = '/?'
super(SimpleRouter, self).__init__()
def get_field_lookup_regex(self, viewset, lookup_prefix=''):
base_regex = '(?P<{lookup_prefix}field_lookup>{lookup_value})'
lookup_value = getattr(viewset, 'lookup_value_regex', '[^/.]+')
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_value=lookup_value
)
def get_urls(self):
"""
Use the registered viewsets to generate a list of URL patterns.
"""
ret = []
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
field_lookup = self.get_field_lookup_regex(viewset)
routes = self.get_routes(viewset)
for route in routes:
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
regex = route.url.format(
prefix=prefix,
lookup=lookup,
field_lookup=field_lookup,
trailing_slash=self.trailing_slash
)
if not prefix and regex[:2] == '^/':
regex = '^' + regex[2:]
initkwargs = route.initkwargs.copy()
initkwargs.update({
'basename': basename,
})
view = viewset.as_view(mapping, **initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
return ret
|
import tracc
import pandas as pd
import numpy as np
class costs:
def __init__(self,
travelcosts_df,
columns = None
):
"""
Inputs data and prunes columns if desired
"""
if columns is not None:
self.data = travelcosts_df[columns]
else:
self.data = travelcosts_df
def intrazonal(self,
cost_column,
origin_column,
destination_column,
method = "constant",
value = 0,
polygon_file = None,
polygon_id = None
):
"""
Computes and updates intrazonal travel cost in a travel costs matrix. The output will include a travel cost between any origin or destination location in the matrix to itself.
Parameters
----------
cost_column : column name for travel costs
origin_column : column name for origin IDs
destinationn_column : column name for origin IDs
method : "constant" applies a single @value to all intrazonal travel costs. "radius" applies a cost which is proportional to the radius of a circle with the same area as its input polygon
value : parameters for the method
polygon_file : file path to an input spatial polygon (e.g. geojson) if needed (it is for method = "radius")
polygon_id : ID field for the polygon_file needed for joining to the cost matrix
"""
# making sure ID columns are strings for a merge later on
self.data[origin_column] = self.data[origin_column].astype(str)
self.data[destination_column] = self.data[destination_column].astype(str)
# getting set of unique locations in the dataset
locations = list(self.data[origin_column].unique()) + list(self.data[destination_column].unique())
locations = list(set(locations))
if method == "constant":
new_times = [value] * len(locations)
df = pd.DataFrame(
list(zip(locations, locations, new_times)),
columns =[origin_column, destination_column, cost_column + "_i"])
elif method == "radius":
from tracc.spatial import radius
# compute based on the equivilant radius of each polygon
df = radius(polygon_file,polygon_id)
df[origin_column] = df[polygon_id]
df[destination_column] = df[polygon_id]
del df[polygon_id]
df[cost_column + "_i"] = value * df["radius"]
del df["radius"]
else:
raise Exception("Method can only be 'constant' or 'radius'")
df[origin_column] = df[origin_column].astype(str)
df[destination_column] = df[destination_column].astype(str)
# join in the newly created intrazonal travel times
self.data = pd.merge(self.data, df, how='outer', left_on=[origin_column, destination_column], right_on = [origin_column, destination_column])
# replace the older intrazonal travel times
self.data[cost_column] = np.where((self.data[cost_column + "_i"] >= 0),self.data[cost_column + "_i"],self.data[cost_column])
del self.data[cost_column + "_i"]
def fill_missing_costs(
self,
cost_column,
origin_column,
destination_column,
spatial_file_path,
spatial_file_id,
where = "origin",
weight_type = "Queen"
):
"""
Completes an OD matrix by filling locations that were missing from the original matrix, based on a neighbourhood spatial weights matrix. For example if a origin zone has no travel costs, it presumes its travel costs to destinations are the average of the same costs of its neighbouring zones.
"""
from tracc.spatial import area
# get list of zones which are missing from the input costs table
dfz = area(spatial_file_path, spatial_file_id)
dfz[spatial_file_id] = dfz[spatial_file_id].astype(str)
self.data[origin_column] = self.data[origin_column].astype(str)
li1 = list(self.data[origin_column].unique())
li2 = list(dfz[spatial_file_id].unique())
missing = [x for x in li2 if x not in li1]
del li1,li2
if len(missing) == 0:
return None
if where == "origin":
# get neighbours for each missing zone
from tracc.spatial import get_neighbours
neighbours = get_neighbours(spatial_file_path, "Queen", spatial_file_id)
new_times = []
# for each zone, compute average travel times to other zones based on neighbours
for location in missing:
locneigh = neighbours[location]
temp = self.data[self.data[origin_column].isin(locneigh)]
temp = pd.DataFrame(temp.groupby([destination_column], as_index=False)[cost_column].mean())
temp[origin_column] = location
new_times.append(temp)
# combine the outputs, and concat to the input times
new_times = pd.concat(new_times)
self.data = pd.concat([self.data, new_times])
elif where == "destination":
# get neighbours for each missing zone
from tracc.spatial import get_neighbours
neighbours = get_neighbours(spatial_file_path, "Queen", spatial_file_id)
new_times = []
# for each zone, compute average travel times from other zones based on neighbours
for location in missing:
locneigh = neighbours[location]
temp = self.data[self.data[destination_column].isin(locneigh)]
temp = pd.DataFrame(temp.groupby([origin_column], as_index=False)[cost_column].mean())
temp[destination_column] = location
new_times.append(temp)
# combine the outputs, and concat to the input times
new_times = pd.concat(new_times)
self.data = pd.concat([self.data, new_times])
else:
raise Exception("Input paramater @where should either be 'origin' or 'destination'")
def generalized_cost(
self,
columns,
coefficients,
exponents = None,
prune_output = True,
output_cost_name = "GC"
):
"""
Computes generalized costs
"""
# need to add a column check warning, and make the intercept = 0 if none is provided
# set all exponents as 1 if none are inputted
if exponents is None:
exponents = [1] * len(columns)
# compute the generalized cost value
self.data[output_cost_name] = coefficients[len(coefficients) - 1]
i = 0
while i < len(columns):
self.data[output_cost_name] = self.data[output_cost_name] + coefficients[i] * self.data[columns[i]] ** exponents[i]
i += 1
# delete initital cost columns if desired
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
def impedence_calc(
self,
cost_column,
impedence_func,
impedence_func_params,
prune_output = False,
output_col_name = "fCij"
):
"""
Measures impdence given input of travel cost and selected impedence funciton and parameters
# To Do: add in more impdence function options
"""
if impedence_func == "cumulative":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.cumulative,args = (impedence_func_params,))
elif impedence_func == "linear":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.linear,args = (impedence_func_params,))
elif impedence_func == "exponential":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.exponential,args = (impedence_func_params,))
else:
raise Exception("Please select an appropriate decay function")
if prune_output is True:
del self.data[cost_column]
def impedence_combine(self,
columns,
how = "product",
output_col_name = "fCij",
prune_output = True
):
"""
If there are multiple impedences, and we want to combine them into a single impedence value. This is similar to genearlized cost.
For example, if we have an impedence value for transit travel time, and we also want to remove any trips based on a fare criteria, it can be applied in this way.
"""
if how == "product":
self.data[output_col_name] = 1
i = 0
while i < len(columns):
self.data[output_col_name] = self.data[output_col_name] * self.data[columns[i]]
i += 1
elif how == "sum":
self.data[output_col_name] = 0
i = 0
while i < len(columns):
self.data[output_col_name] = self.data[output_col_name] + self.data[columns[i]]
i += 1
else:
raise Exception('the input @how must be one of "product" or "sum"')
def max_impedence(self,
columns,
imp_col_name = "fCij"
):
"""
Reduces the cost table to only include rows with the maximum impedence value for the set of input columns.
For example, if there 3 transit trips from i to j, each with a different computed generalized_cost resulting from different route choices, this function will return the row with the one resulting in the greatest impedence value (i.e. lowest generalized cost)
"""
self.data = self.data.groupby(columns)[imp_col_name].max().reset_index()
class supply:
def __init__(self,
supply_df,
columns = None
):
"""
intitializing can include pruning the dataset to a list of @column names
"""
if columns is not None:
self.data = supply_df[columns]
else:
self.data = supply_df
def weight(self,
columns,
weights,
weight_col_name = "Oj",
prune_output = True
):
"""
Creating a value based on a weighted linear combination other values. Can be used to weight by destinations by their desirability.
Parameters
----------------
columns : columns in which to input into the weights function
weights : linear multipliers, the same length as the weights
weight_col_name : output column name
prune_output : if True, delete all input columns used in the weight function
"""
if len(columns) != len(weights):
raise Exception("Please make sure columns and weights are lists of the same length")
if len(columns) < 2:
raise Exception("Can only weight opportunities if 2 or more are inputted")
if sum(weights) < 0.999 or sum(weights) > 1.001:
print("WARNING: the inputted weights do not sum to 1.")
self.data[weight_col_name] = 0
i = 0
while i < len(columns):
self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]
i += 1
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
class demand:
def __init__(self,
demand_df,
columns = None
):
"""
intitializing can include pruning the dataset to a list of @column names
"""
if columns is not None:
self.data = demand_df[columns]
else:
self.data = demand_df
def weight(self,
columns,
weights,
weight_col_name = "Pi",
prune_output = True
):
"""
Creating a value based on a weighted linear combination other values. Can be used to weight by population groups by their propensity to travel to certain activity types.
Parameters
----------------
columns : columns in which to input into the weights function
weights : linear multipliers, the same length as the weights
weight_col_name : output column name
prune_output : if True, delete all input columns used in the weight function
"""
if len(columns) != len(weights):
raise Exception("Please make sure columns and weights are lists of the same length")
if len(columns) < 2:
raise Exception("Can only weight opportunities if 2 or more are inputted")
if sum(weights) < 0.999 or sum(weights) > 1.001:
print("WARNING: the inputted weights do not sum to 1.")
self.data[weight_col_name] = 0
i = 0
while i < len(columns):
self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]
i += 1
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
class accessibility:
def __init__(self,
travelcosts_df,
supply_df,
demand_df = None,
travelcosts_ids = ["origin_id","destination_id"],
supply_ids = "destination_id",
demand_ids = None
):
"""
Parameters
----------
travelcosts_df : a pandas dataframe containing travel costs from a set of locations (e.g. orignis) to another set of locations (e.g. destinations). Data should be in a long table format:
origin_id | destination_id | travel_cost_1 | travel_cost_2 (optional) | etc (optional)
supply_df : a pandas dataframe containing the number of opportunities (e.g. supply), relational to the destination IDs in travelcosts_df
demand_df : a pandas dataframe containing the number of agents competiting for opportunities (e.g. demand), relational to the origin IDs in travelcosts_df. This is optional since several accessibility measures do not account for demand
travelcosts_ids : a two item list of the column names for the origin and destination IDs in the travelcosts_df table
supply_ids : a single variable string for the destination ID in the supply_df table
demand_ids : a single variable string for the origin ID in the demand_df table. This is optional since several accessibility measures do not account for demand
"""
self.travelcosts_ids = travelcosts_ids
self.supply_ids = supply_ids
self.demand_ids = demand_ids
if demand_df is None and supply_df is None:
raise Exception("Please input a supply_df or a demand_df")
# setting ID columns to strings to aid merging
travelcosts_df[travelcosts_ids[0]] = travelcosts_df[travelcosts_ids[0]].astype(str)
travelcosts_df[travelcosts_ids[1]] = travelcosts_df[travelcosts_ids[1]].astype(str)
# join supply data to the travel costs
if supply_df is not None and demand_df is None:
supply_df[supply_ids] = supply_df[supply_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
supply_df,
left_on=travelcosts_ids[1],
right_on=self.supply_ids,
how = 'left'
)
# join demand data as well, if inputted
elif demand_df is not None and supply_df is None:
demand_df[demand_ids] = demand_df[demand_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
demand_df,
left_on=travelcosts_ids[0],
right_on=self.demand_ids,
how = 'left'
)
else:
supply_df[supply_ids] = supply_df[supply_ids].astype(str)
demand_df[demand_ids] = demand_df[demand_ids].astype(str)
self.data = pd.merge(
travelcosts_df,
supply_df,
left_on=travelcosts_ids[1],
right_on=self.supply_ids,
how = 'left'
)
self.data = pd.merge(
self.data,
demand_df,
left_on=travelcosts_ids[0],
right_on=self.demand_ids,
how = 'left'
)
def potential(self, opportunity, impedence, output_col_name = None):
"""
Measures potential accessibility to destinations
Parameters
----------
opportunity : a string indicating the column name for which opportunity we are measuring access to (e.g. jobs, grocery stores, etc.). This column should be in the supply_df dataframe
impedence : column from the travel costs object to weight opportunities by
output_col_name : a string for the column name of the output accessibility measure
Output
----------
A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column accessibility measures based on the input parameters.
"""
# set the output name for the accessibility measure
if output_col_name is None:
A_col_name = "A_" + opportunity + "_" + impedence
else:
A_col_name = output_col_name
# multiply the opportunity by the impedence
self.data[A_col_name] = self.data[opportunity] * self.data[impedence]
# sum by the origin locations
Ai = self.data.groupby(self.travelcosts_ids[0])[[A_col_name]].sum().reset_index()
del self.data[A_col_name]
return Ai
def passive(self, population, impedence, output_col_name = None):
"""
Measures passive accessibility to destinations
Parameters
----------
population : a string indicating the column name for which population we are measuring access to (e.g. overall population, employed population, etc.). This column should be in the demand_df dataframe
impedence : column from the travel costs object to weight opportunities by
output_col_name : a string for the column name of the output accessibility measure
Output
----------
A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column accessibility measures based on the input parameters.
"""
# set the output name for the accessibility measure
if output_col_name is None:
A_col_name = "A_" + population + "_" + impedence
else:
A_col_name = output_col_name
# multiply the opportunity by the impedence
self.data[A_col_name] = self.data[population] * self.data[impedence]
# sum by the origin locations
Ai = self.data.groupby(self.travelcosts_ids[1])[[A_col_name]].sum().reset_index()
del self.data[A_col_name]
return Ai
def mintravelcost(self, travelcost, opportunity, min_n, output_col_name = None):
"""
Parameters
----------
opportunity : a string indicating the column name for which opportunity we are measuring access to (e.g. jobs, grocery stores, etc.). This column should be in the supply_df dataframe
travelcost : a string indicating the column name for which travel cost shall be used (e.g. travel time, monetary cost, etc.). This column should be in the travelcosts_df dataframe
min_n : an int indicating the number of desired reachable opportunities (e.g. 1 library, 3 grocery stores, 10k jobs, etc.)
output_col_name : a string for the column name of the output accessibility measure
Output
---------
A pandas dataframe with the first column with the IDs of the origin point (self.travelcosts_ids[0]), and the second column are the accessibility measures based on the input parameters.
"""
# set the output name for the accessibility measure
if output_col_name is None:
A_col_name = "A_mintravelcost_" + str(travelcost) + "_" + str(opportunity) + "_" + str(min_n)
else:
A_col_name = output_col_name
# internal function of returning the min travel time for n opportunities
def get_min(df, tc, o, n):
df = df.sort_values(by=[tc], ascending=True)
df["cumsum"] = df[o].cumsum()
df = df[df["cumsum"] >= n]
return df[travelcost].min()
# generating the accessibility measure
out = pd.DataFrame(self.data.groupby(self.travelcosts_ids[0]).apply(get_min, tc = travelcost, o = opportunity, n = min_n))
# setting the column name of the output
out.columns = [A_col_name]
return out
class summary:
"""
Computing various summary statistics of accessibility, usually with respect to different population groups
Some of these can be used to assess distributions and equity of transport networks.
"""
def __init__(
self,
accessibility_df,
summary_vars,
accessibility_id = "id",
summary_vars_id = "id"
):
# join the data
self.data = pd.merge(
accessibility_df,
summary_vars,
left_on=accessibility_id,
right_on=summary_vars_id,
how = 'left'
)
def weighted_mean(self, access_var, group_var):
return tracc.statistics.weighted_mean(self.data, access_var, group_var)
def weighted_var(self, access_var, group_var):
return tracc.statistics.weighted_var(self.data, access_var, group_var)
def weighted_sd(self, access_var, group_var):
return tracc.statistics.weighted_sd(self.data, access_var, group_var)
def weighted_CV(self, access_var, group_var):
return tracc.statistics.weighted_CV(self.data, access_var, group_var)
def weighted_Gini(self, access_var, group_var):
return tracc.statistics.weighted_Gini(self.data, access_var, group_var)
def quantiles(self, access_var, group_vars, nbins = 10, result = "percent"):
# assign each observation a bin, based on nbins
dfq = pd.DataFrame( tracc.statistics.weighted_qcut(self.data[access_var], self.data[group_vars[0]], nbins))
# create a specific name for the quantile column
q_col_name = 'q' + str(nbins) + "_" + (group_vars[0])
dfq.columns = [q_col_name]
self.data = self.data.join(dfq, how='outer')
# group by each bin, susmmarize
dfq = self.data.groupby([q_col_name])[group_vars].sum()
# return as counts or percent
if result == "count":
return dfq
elif result == "percent":
for var in group_vars:
dfq[var] = dfq[var] / dfq[var].sum()
return dfq
|
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph import get_backend
from rlgraph.components.memories.memory import Memory
from rlgraph.components.memories.fifo_queue import FIFOQueue
from rlgraph.components.memories.prioritized_replay import PrioritizedReplay
from rlgraph.components.memories.replay_memory import ReplayMemory
from rlgraph.components.memories.ring_buffer import RingBuffer
from rlgraph.components.memories.mem_prioritized_replay import MemPrioritizedReplay
# TODO backend reorg.
if get_backend() == "tf":
Memory.__lookup_classes__ = dict(
fifo=FIFOQueue,
fifoqueue=FIFOQueue,
prioritized=PrioritizedReplay,
prioritizedreplay=PrioritizedReplay,
prioritizedreplaybuffer=PrioritizedReplay,
mem_prioritized_replay=MemPrioritizedReplay,
replay=ReplayMemory,
replaybuffer=ReplayMemory,
replaymemory=ReplayMemory,
ringbuffer=RingBuffer
)
elif get_backend() == "pytorch":
Memory.__lookup_classes__ = dict(
prioritized=MemPrioritizedReplay,
prioritizedreplay=MemPrioritizedReplay,
prioritizedreplaybuffer=MemPrioritizedReplay,
mem_prioritized_replay=MemPrioritizedReplay,
replay=ReplayMemory,
replaybuffer=ReplayMemory,
replaymemory=ReplayMemory,
ringbuffer=RingBuffer
)
Memory.__default_constructor__ = ReplayMemory
__all__ = ["Memory", "PrioritizedReplay"] + \
list(set(map(lambda x: x.__name__, Memory.__lookup_classes__.values())))
|
import time
from http import HTTPStatus
from typing import Dict, List, Optional, Type
import pytest
from aioauth.storage import BaseStorage
from aioauth.config import Settings
from aioauth.models import Token
from aioauth.requests import Post, Request
from aioauth.server import AuthorizationServer
from aioauth.types import ErrorType, GrantType, RequestMethod
from aioauth.utils import (
catch_errors_and_unavailability,
encode_auth_headers,
generate_token,
)
from .models import Defaults
@pytest.mark.asyncio
async def test_internal_server_error():
class EndpointClass:
available: Optional[bool] = True
def __init__(self, available: Optional[bool] = None):
if available is not None:
self.available = available
@catch_errors_and_unavailability
async def server(self, request):
raise Exception()
e = EndpointClass()
response = await e.server(Request(method=RequestMethod.POST))
assert response.status_code == HTTPStatus.BAD_REQUEST
@pytest.mark.asyncio
async def test_invalid_token(server: AuthorizationServer, defaults: Defaults):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
request_url = "https://localhost"
token = "invalid token"
post = Post(token=token)
request = Request(
user_id=user_id,
url=request_url,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert not response.content["active"]
assert response.status_code == HTTPStatus.OK
@pytest.mark.asyncio
async def test_expired_token(
server: AuthorizationServer, storage: Dict[str, List], defaults: Defaults
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
settings = Settings(INSECURE_TRANSPORT=True)
token = Token(
user_id=user_id,
client_id=client_id,
expires_in=settings.TOKEN_EXPIRES_IN,
refresh_token_expires_in=settings.REFRESH_TOKEN_EXPIRES_IN,
access_token=generate_token(42),
refresh_token=generate_token(48),
issued_at=int(time.time() - settings.TOKEN_EXPIRES_IN),
scope=defaults.scope,
)
storage["tokens"].append(token)
post = Post(token=token.access_token)
request = Request(
user_id=user_id,
settings=settings,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.OK
assert not response.content["active"]
@pytest.mark.asyncio
async def test_valid_token(
server: AuthorizationServer,
storage: Dict[str, List],
defaults: Defaults,
settings: Settings,
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
token = storage["tokens"][0]
post = Post(token=token.access_token)
request = Request(
user_id=user_id,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
settings=settings,
)
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.OK
assert response.content["active"]
@pytest.mark.asyncio
async def test_introspect_revoked_token(
server: AuthorizationServer,
storage: Dict[str, List],
defaults: Defaults,
settings: Settings,
):
user_id = defaults.user_id
client_id = defaults.client_id
client_secret = defaults.client_secret
request_url = "https://localhost"
token = storage["tokens"][0]
post = Post(
grant_type=GrantType.TYPE_REFRESH_TOKEN,
refresh_token=token.refresh_token,
)
request = Request(
user_id=user_id,
settings=settings,
url=request_url,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_response(request)
assert response.status_code == HTTPStatus.OK
# Check that refreshed token was revoked
post = Post(token=token.access_token)
request = Request(
settings=settings,
post=post,
method=RequestMethod.POST,
headers=encode_auth_headers(client_id, client_secret),
)
response = await server.create_token_introspection_response(request)
assert not response.content["active"], "The refresh_token must be revoked"
@pytest.mark.asyncio
async def test_endpoint_availability(db_class: Type[BaseStorage]):
server = AuthorizationServer(storage=db_class())
request = Request(method=RequestMethod.POST, settings=Settings(AVAILABLE=False))
response = await server.create_token_introspection_response(request)
assert response.status_code == HTTPStatus.BAD_REQUEST
assert response.content["error"] == ErrorType.TEMPORARILY_UNAVAILABLE
|
#ABC051d
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
|
"""
Revision ID: 0256_set_postage_tmplt_hstr
Revises: 0255_another_letter_org
Create Date: 2019-02-05 14:51:30.808067
"""
from alembic import op
import sqlalchemy as sa
revision = '0256_set_postage_tmplt_hstr'
down_revision = '0255_another_letter_org'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute(
"""UPDATE templates_history SET postage = services.postage
FROM services WHERE template_type = 'letter' AND service_id = services.id"""
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("UPDATE templates_history SET postage = null WHERE template_type = 'letter'")
# ### end Alembic commands ###
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
import sys
# create pipeline - structured grid
#
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName("" + str(VTK_DATA_ROOT) + "/Data/combxyz.bin")
pl3d.SetQFileName("" + str(VTK_DATA_ROOT) + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
output = pl3d.GetOutput().GetBlock(0)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(output)
gMapper = vtk.vtkPolyDataMapper()
gMapper.SetInputConnection(gf.GetOutputPort())
gMapper.SetScalarRange(output.GetScalarRange())
gActor = vtk.vtkActor()
gActor.SetMapper(gMapper)
gf2 = vtk.vtkGeometryFilter()
gf2.SetInputData(output)
gf2.ExtentClippingOn()
gf2.SetExtent(10,17,-6,6,23,37)
gf2.PointClippingOn()
gf2.SetPointMinimum(0)
gf2.SetPointMaximum(10000)
gf2.CellClippingOn()
gf2.SetCellMinimum(0)
gf2.SetCellMaximum(7500)
g2Mapper = vtk.vtkPolyDataMapper()
g2Mapper.SetInputConnection(gf2.GetOutputPort())
g2Mapper.SetScalarRange(output.GetScalarRange())
g2Actor = vtk.vtkActor()
g2Actor.SetMapper(g2Mapper)
g2Actor.AddPosition(0,15,0)
# create pipeline - poly data
#
gf3 = vtk.vtkGeometryFilter()
gf3.SetInputConnection(gf.GetOutputPort())
g3Mapper = vtk.vtkPolyDataMapper()
g3Mapper.SetInputConnection(gf3.GetOutputPort())
g3Mapper.SetScalarRange(output.GetScalarRange())
g3Actor = vtk.vtkActor()
g3Actor.SetMapper(g3Mapper)
g3Actor.AddPosition(0,0,15)
gf4 = vtk.vtkGeometryFilter()
gf4.SetInputConnection(gf2.GetOutputPort())
gf4.ExtentClippingOn()
gf4.SetExtent(10,17,-6,6,23,37)
gf4.PointClippingOn()
gf4.SetPointMinimum(0)
gf4.SetPointMaximum(10000)
gf4.CellClippingOn()
gf4.SetCellMinimum(0)
gf4.SetCellMaximum(7500)
g4Mapper = vtk.vtkPolyDataMapper()
g4Mapper.SetInputConnection(gf4.GetOutputPort())
g4Mapper.SetScalarRange(output.GetScalarRange())
g4Actor = vtk.vtkActor()
g4Actor.SetMapper(g4Mapper)
g4Actor.AddPosition(0,15,15)
# create pipeline - unstructured grid
#
s = vtk.vtkSphere()
s.SetCenter(output.GetCenter())
s.SetRadius(100.0)
#everything
eg = vtk.vtkExtractGeometry()
eg.SetInputData(output)
eg.SetImplicitFunction(s)
gf5 = vtk.vtkGeometryFilter()
gf5.SetInputConnection(eg.GetOutputPort())
g5Mapper = vtk.vtkPolyDataMapper()
g5Mapper.SetInputConnection(gf5.GetOutputPort())
g5Mapper.SetScalarRange(output.GetScalarRange())
g5Actor = vtk.vtkActor()
g5Actor.SetMapper(g5Mapper)
g5Actor.AddPosition(0,0,30)
gf6 = vtk.vtkGeometryFilter()
gf6.SetInputConnection(eg.GetOutputPort())
gf6.ExtentClippingOn()
gf6.SetExtent(10,17,-6,6,23,37)
gf6.PointClippingOn()
gf6.SetPointMinimum(0)
gf6.SetPointMaximum(10000)
gf6.CellClippingOn()
gf6.SetCellMinimum(0)
gf6.SetCellMaximum(7500)
g6Mapper = vtk.vtkPolyDataMapper()
g6Mapper.SetInputConnection(gf6.GetOutputPort())
g6Mapper.SetScalarRange(output.GetScalarRange())
g6Actor = vtk.vtkActor()
g6Actor.SetMapper(g6Mapper)
g6Actor.AddPosition(0,15,30)
# create pipeline - rectilinear grid
#
rgridReader = vtk.vtkRectilinearGridReader()
rgridReader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/RectGrid2.vtk")
rgridReader.Update()
gf7 = vtk.vtkGeometryFilter()
gf7.SetInputConnection(rgridReader.GetOutputPort())
g7Mapper = vtk.vtkPolyDataMapper()
g7Mapper.SetInputConnection(gf7.GetOutputPort())
g7Mapper.SetScalarRange(rgridReader.GetOutput().GetScalarRange())
g7Actor = vtk.vtkActor()
g7Actor.SetMapper(g7Mapper)
g7Actor.SetScale(3,3,3)
gf8 = vtk.vtkGeometryFilter()
gf8.SetInputConnection(rgridReader.GetOutputPort())
gf8.ExtentClippingOn()
gf8.SetExtent(0,1,-2,2,0,4)
gf8.PointClippingOn()
gf8.SetPointMinimum(0)
gf8.SetPointMaximum(10000)
gf8.CellClippingOn()
gf8.SetCellMinimum(0)
gf8.SetCellMaximum(7500)
g8Mapper = vtk.vtkPolyDataMapper()
g8Mapper.SetInputConnection(gf8.GetOutputPort())
g8Mapper.SetScalarRange(rgridReader.GetOutput().GetScalarRange())
g8Actor = vtk.vtkActor()
g8Actor.SetMapper(g8Mapper)
g8Actor.SetScale(3,3,3)
g8Actor.AddPosition(0,15,0)
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(gActor)
ren1.AddActor(g2Actor)
ren1.AddActor(g3Actor)
ren1.AddActor(g4Actor)
ren1.AddActor(g5Actor)
ren1.AddActor(g6Actor)
ren1.AddActor(g7Actor)
ren1.AddActor(g8Actor)
renWin.SetSize(340,550)
cam1 = ren1.GetActiveCamera()
cam1.SetClippingRange(84,174)
cam1.SetFocalPoint(5.22824,6.09412,35.9813)
cam1.SetPosition(100.052,62.875,102.818)
cam1.SetViewUp(-0.307455,-0.464269,0.830617)
iren.Initialize()
# prevent the tk window from showing up then start the event loop
# test that the cell data is properly mapped in the output
ug = vtk.vtkUnstructuredGrid()
p = vtk.vtkPoints()
p.InsertNextPoint(0, 0, 0)
p.InsertNextPoint(1, 0, 0)
p.InsertNextPoint(2, 0, 0)
p.InsertNextPoint(3, 0, 0)
ug.SetPoints(p)
ug.GetNumberOfPoints()
ug.Allocate(4)
lpts = [0, 1]
ug.InsertNextCell(vtk.VTK_LINE, 2, lpts)
vpts = [1]
ug.InsertNextCell(vtk.VTK_VERTEX, 1, vpts)
lpts = [2, 3]
ug.InsertNextCell(vtk.VTK_LINE, 2, lpts)
vpts = [3]
ug.InsertNextCell(vtk.VTK_VERTEX, 1, vpts)
aa = vtk.vtkIntArray()
aa.InsertNextValue(0)
aa.InsertNextValue(1)
aa.InsertNextValue(2)
aa.InsertNextValue(3)
aa.SetName('testarray')
ug.GetCellData().AddArray(aa)
gf = vtk.vtkGeometryFilter()
gf.SetInputData(ug)
gf.Update()
pd = gf.GetOutput()
oa = pd.GetCellData().GetArray('testarray')
# Check that the ordering of polydata arrays is correct. Verts should come before
# lines.
correctcelldata = [1, 3, 0, 2]
if oa.GetValue(0) != correctcelldata[0] and oa.GetValue(0) != correctcelldata[1]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(1) != correctcelldata[0] and oa.GetValue(1) != correctcelldata[1]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(2) != correctcelldata[2] and oa.GetValue(2) != correctcelldata[3]:
print('Bad celldata of test array')
sys.exit(1)
if oa.GetValue(3) != correctcelldata[2] and oa.GetValue(3) != correctcelldata[3]:
print('Bad celldata of test array')
sys.exit(1)
# --- end of script --
|
from __future__ import absolute_import, division, print_function
from scitbx import matrix
from dials.algorithms.refinement.parameterisation.scan_varying_model_parameters import (
ScanVaryingParameterSet,
ScanVaryingModelParameterisation,
GaussianSmoother,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationMixin,
CrystalUnitCellMixin,
)
from dials.algorithms.refinement.refinement_helpers import CrystalOrientationCompose
class ScanVaryingCrystalOrientationParameterisation(
ScanVaryingModelParameterisation, CrystalOrientationMixin
):
"""Scan-varying parameterisation for crystal orientation, with angles
expressed in mrad"""
def __init__(self, crystal, t_range, num_intervals, experiment_ids=None):
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan varying crystal orientation parameterisation
# is an orientation
# matrix '[U](t)', expressed as a function of image number 't'
# in a sequential scan.
#
# The initial state is a snapshot of the crystal orientation
# at the point of initialisation '[U0]', which is independent of
# image number.
#
# Future states are composed by
# rotations around axes of the phi-axis frame by Tait-Bryan angles.
#
# [U](t) = [Phi3](t)[Phi2](t)[Phi1](t)[U0]
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
istate = matrix.sqr(crystal.get_U())
self._U_at_t = istate
# Factory function to provide to _build_p_list
def parameter_type(value, axis, ptype, name):
return ScanVaryingParameterSet(value, nv, axis, ptype, name)
# Build the parameter list
p_list = self._build_p_list(parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# Extract orientation from the initial state
U0 = self._initial_state
# extract parameter sets from the internal list
phi1_set, phi2_set, phi3_set = self._param
# extract angles and other data at time t using the smoother
phi1, phi1_weights, phi1_sumweights = self._smoother.value_weight(t, phi1_set)
phi2, phi2_weights, phi2_sumweights = self._smoother.value_weight(t, phi2_set)
phi3, phi3_weights, phi3_sumweights = self._smoother.value_weight(t, phi3_set)
# calculate derivatives of angles wrt underlying parameters.
dphi1_dp = phi1_weights * (1.0 / phi1_sumweights)
dphi2_dp = phi2_weights * (1.0 / phi2_sumweights)
dphi3_dp = phi3_weights * (1.0 / phi3_sumweights)
# calculate state and derivatives using the helper class
coc = CrystalOrientationCompose(
U0, phi1, phi1_set.axis, phi2, phi2_set.axis, phi3, phi3_set.axis
)
self._U_at_t = coc.U()
dU_dphi1 = coc.dU_dphi1()
dU_dphi2 = coc.dU_dphi2()
dU_dphi3 = coc.dU_dphi3()
# calculate derivatives of state wrt underlying parameters
dU_dp1 = [None] * dphi1_dp.size
for (i, v) in dphi1_dp:
dU_dp1[i] = dU_dphi1 * v
dU_dp2 = [None] * dphi2_dp.size
for (i, v) in dphi2_dp:
dU_dp2[i] = dU_dphi2 * v
dU_dp3 = [None] * dphi3_dp.size
for (i, v) in dphi3_dp:
dU_dp3[i] = dU_dphi3 * v
# store derivatives as list-of-lists
self._dstate_dp = [dU_dp1, dU_dp2, dU_dp3]
return
def get_state(self):
"""Return crystal orientation matrix [U] at image number t"""
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return self._U_at_t
class ScanVaryingCrystalUnitCellParameterisation(
ScanVaryingModelParameterisation, CrystalUnitCellMixin
):
"""Scan-varying parameterisation for the crystal unit cell"""
def __init__(
self,
crystal,
t_range,
num_intervals,
experiment_ids=None,
set_state_uncertainties=False,
):
self._set_state_uncertainties = set_state_uncertainties
from scitbx import matrix
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan-varying unit cell parameterisation is the
# reciprocal space orthogonalisation matrix '[B](t)', expressed as a
# function of image number 't' in a sequential scan.
# Other comments from CrystalUnitCellParameterisation are relevant here
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
istate = None
self._B_at_t = matrix.sqr(crystal.get_B())
# Factory function to provide to _build_p_list
def parameter_type(value, name):
return ScanVaryingParameterSet(value, nv, name=name)
# Build the parameter list
p_list = self._build_p_list(crystal, parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, crystal, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# extract values and weights at time t using the smoother
vals, weights, sumweights = zip(
*(self._smoother.value_weight(t, pset) for pset in self._param)
)
# calculate derivatives of metrical matrix parameters wrt underlying
# scan-varying parameters
inv_sumw = [1.0 / sw for sw in sumweights]
dvals_dp = [e * isw for e, isw in zip(weights, inv_sumw)]
# calculate new B and derivatives
self._B_at_t, dB_dval = self._compose_core(vals)
# calculate derivatives of state wrt underlying parameters
self._dstate_dp = [
[b * e for e in a.as_dense_vector()] for a, b in zip(dvals_dp, dB_dval)
]
self._dstate_dp = [[None] * e.size for e in dvals_dp]
for i, (dv, dB) in enumerate(zip(dvals_dp, dB_dval)):
for j, e in dv:
self._dstate_dp[i][j] = e * dB
return
def get_state(self):
"""Return crystal orthogonalisation matrix [B] at image number t"""
# only a single crystal is parameterised here, so no multi_state_elt
# argument is allowed
return self._B_at_t
def set_state_uncertainties(self, var_cov_list):
"""Send the calculated variance-covariance of the elements of the B matrix
for all scan points back to the crystal model, if required
"""
if not self._set_state_uncertainties:
return
# Convert list of 9*9 matrices to a 3d array
from scitbx.array_family import flex
B_cov = flex.double(flex.grid(len(var_cov_list), 9, 9))
for i, v in enumerate(var_cov_list):
v = v.as_flex_double_matrix()
v.reshape(flex.grid(1, 9, 9))
B_cov[i : (i + 1), :, :] = v
# Pass it back to the model
self._model.set_B_covariance_at_scan_points(B_cov)
|
# ElectrumSV - lightweight BitcoinSV client
# Copyright (C) 2012 thomasv@gitorious
# Copyright (C) 2019-2020 The ElectrumSV Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# source: http://stackoverflow.com/questions/2758159
import os
import re
import sys
import traceback
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QFont, QTextCursor, QTextOption
from electrumsv import util
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.platform import platform
logger = logs.get_logger("console")
class OverlayLabel(QtWidgets.QLabel):
STYLESHEET = '''
QLabel, QLabel link {
color: rgb(0, 0, 0);
background-color: rgb(248, 240, 200);
border: 1px solid;
border-color: rgb(255, 114, 47);
padding: 2px;
}
'''
def __init__(self, text, parent):
super().__init__(text, parent)
self.setMinimumHeight(150)
self.setGeometry(0, 0, self.width(), self.height())
self.setStyleSheet(self.STYLESHEET)
self.setMargin(0)
parent.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setWordWrap(True)
def mousePressEvent(self, e):
self.hide()
def on_resize(self, w):
padding = 2 # px, from the stylesheet above
self.setFixedWidth(w - padding)
class Console(QtWidgets.QPlainTextEdit):
def __init__(self, prompt='>> ', startup_message='', parent=None):
QtWidgets.QPlainTextEdit.__init__(self, parent)
self.prompt = prompt
self.history = []
self.namespace = {}
self.construct = []
self.setGeometry(50, 75, 600, 400)
self.setWordWrapMode(QTextOption.WrapAnywhere)
self.setUndoRedoEnabled(False)
self.document().setDefaultFont(QFont(platform.monospace_font, 10, QFont.Normal))
self.showMessage(startup_message)
self.updateNamespace({'run': self.run_script})
self.set_json(False)
warning_text = "<h1><center>{}</center></h1><br>{}<br><br>{}<br><br>{}".format(
_("Warning!"),
_("Do not run code here that you don't understand. Running bad or malicious code "
"could lead to your coins being irreversibly lost."),
_("Text shown here is sent by the server and may be malicious; ignore anything it "
"might be asking you to do."),
_("Click here to hide this message.")
)
self.messageOverlay = OverlayLabel(warning_text, self)
def resizeEvent(self, e):
super().resizeEvent(e)
scrollbar_width = self.verticalScrollBar().width() * self.verticalScrollBar().isVisible()
self.messageOverlay.on_resize(self.width() - scrollbar_width)
def set_json(self, b):
self.is_json = b
def run_script(self, filename):
with open(filename) as f:
script = f.read()
# eval is generally considered bad practice. use it wisely!
# pylint: disable=eval-used
eval(script, self.namespace, self.namespace)
def updateNamespace(self, namespace):
self.namespace.update(namespace)
def showMessage(self, message):
self.appendPlainText(message)
self.newPrompt()
def clear(self):
self.setPlainText('')
self.newPrompt()
def newPrompt(self):
if self.construct:
prompt = '.' * len(self.prompt)
else:
prompt = self.prompt
self.completions_pos = self.textCursor().position()
self.completions_visible = False
self.appendPlainText(prompt)
self.moveCursor(QTextCursor.End)
def getCommand(self):
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
curr_line = curr_line.rstrip()
curr_line = curr_line[len(self.prompt):]
return curr_line
def setCommand(self, command):
if self.getCommand() == command:
return
doc = self.document()
curr_line = doc.findBlockByLineNumber(doc.lineCount() - 1).text()
self.moveCursor(QTextCursor.End)
for i in range(len(curr_line) - len(self.prompt)):
self.moveCursor(QTextCursor.Left, QTextCursor.KeepAnchor)
self.textCursor().removeSelectedText()
self.textCursor().insertText(command)
self.moveCursor(QTextCursor.End)
def show_completions(self, completions):
if self.completions_visible:
self.hide_completions()
c = self.textCursor()
c.setPosition(self.completions_pos)
completions = [x.split('.')[-1] for x in completions]
t = '\n' + ' '.join(completions)
if len(t) > 500:
t = t[:500] + '...'
c.insertText(t)
self.completions_end = c.position()
self.moveCursor(QTextCursor.End)
self.completions_visible = True
def hide_completions(self):
if not self.completions_visible:
return
c = self.textCursor()
c.setPosition(self.completions_pos)
for x in range(self.completions_end - self.completions_pos):
c.deleteChar()
self.moveCursor(QTextCursor.End)
self.completions_visible = False
def getConstruct(self, command):
if self.construct:
prev_command = self.construct[-1]
self.construct.append(command)
if not prev_command and not command:
ret_val = '\n'.join(self.construct)
self.construct = []
return ret_val
else:
return ''
else:
if command and command[-1] == (':'):
self.construct.append(command)
return ''
else:
return command
def getHistory(self):
return self.history
def setHisory(self, history):
self.history = history
def addToHistory(self, command):
if command[0:1] == ' ':
return
if command and (not self.history or self.history[-1] != command):
self.history.append(command)
self.history_index = len(self.history)
def getPrevHistoryEntry(self):
if self.history:
self.history_index = max(0, self.history_index - 1)
return self.history[self.history_index]
return ''
def getNextHistoryEntry(self):
if self.history:
hist_len = len(self.history)
self.history_index = min(hist_len, self.history_index + 1)
if self.history_index < hist_len:
return self.history[self.history_index]
return ''
def getCursorPosition(self):
c = self.textCursor()
return c.position() - c.block().position() - len(self.prompt)
def setCursorPosition(self, position):
self.moveCursor(QTextCursor.StartOfLine)
for i in range(len(self.prompt) + position):
self.moveCursor(QTextCursor.Right)
def register_command(self, c, func):
methods = {c: func}
self.updateNamespace(methods)
def runCommand(self):
command = self.getCommand()
self.addToHistory(command)
command = self.getConstruct(command)
if command:
tmp_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def flush(self):
pass
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
QtCore.QCoreApplication.processEvents()
self.skip = not self.skip
sys.stdout = stdoutProxy(self.appendPlainText)
try:
try:
# eval is generally considered bad practice. use it wisely!
# pylint: disable=eval-used
result = eval(command, self.namespace, self.namespace)
if result is not None:
if self.is_json:
print(util.json_encode(result))
else:
self.appendPlainText(repr(result))
except SyntaxError:
# exec is generally considered bad practice. use it wisely!
# pylint: disable=exec-used
exec(command, self.namespace, self.namespace)
except SystemExit:
self.close()
except Exception:
# Catch errors in the network layer as well, as long as it uses Exception.
traceback_lines = traceback.format_exc().split('\n')
# Remove traceback mentioning this file, and a linebreak
for i in (3, 2, 1, -1):
traceback_lines.pop(i)
self.appendPlainText('\n'.join(traceback_lines))
sys.stdout = tmp_stdout
self.newPrompt()
self.set_json(False)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Tab:
self.completions()
return
self.hide_completions()
if event.key() in (QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return):
self.runCommand()
return
if event.key() == QtCore.Qt.Key_Home:
self.setCursorPosition(0)
return
if event.key() == QtCore.Qt.Key_PageUp:
return
elif event.key() in (QtCore.Qt.Key_Left, QtCore.Qt.Key_Backspace):
if self.getCursorPosition() == 0:
return
elif event.key() == QtCore.Qt.Key_Up:
self.setCommand(self.getPrevHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_Down:
self.setCommand(self.getNextHistoryEntry())
return
elif event.key() == QtCore.Qt.Key_L and event.modifiers() == QtCore.Qt.ControlModifier:
self.clear()
super(Console, self).keyPressEvent(event)
def completions(self):
cmd = self.getCommand()
lastword = re.split(r' |\(|\)', cmd)[-1]
beginning = cmd[0: -len(lastword)]
path = lastword.split('.')
prefix = '.'.join(path[:-1])
prefix = (prefix + '.') if prefix else prefix
ns = self.namespace.keys()
if len(path) > 1:
obj = self.namespace.get(path[0])
try:
for attr in path[1:-1]:
obj = getattr(obj, attr)
except AttributeError:
ns = []
else:
ns = dir(obj)
completions = []
for name in ns:
if name[0] == '_':
continue
if name.startswith(path[-1]):
completions.append(prefix + name)
completions.sort()
if not completions:
self.hide_completions()
elif len(completions) == 1:
self.hide_completions()
self.setCommand(beginning + completions[0])
else:
# find common prefix
p = os.path.commonprefix(completions)
if len(p) > len(lastword):
self.hide_completions()
self.setCommand(beginning + p)
else:
self.show_completions(completions)
welcome_message = '''
---------------------------------------------------------------
Welcome to a primitive Python interpreter.
---------------------------------------------------------------
'''
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
console = Console(startup_message=welcome_message)
console.updateNamespace({'myVar1': app, 'myVar2': 1234})
console.show()
sys.exit(app.exec_())
|
import pygame
import os
import holder as ch
import time
textAsset = []
texture2D = os.listdir('texture2D')
textAsset = os.listdir('textAsset')
text_ = []
for text in textAsset:
text_.append(text.split('.'))
textAsset = []
for text in text_:
textAsset.append(text[0])
textAsset = set(textAsset)
text_ = []
for text in texture2D:
text_.append(text.split('.'))
texture2D = []
for text in text_:
texture2D.append(text[0])
for name in texture2D:
if name not in textAsset:
print("切分文件丢失,请添加【"+name+".atlas.txt】至TextAsset文件夹" )
else:
ch.body_cut(name)
print('完成一个,为'+name)
print("完成,将于15s后关闭")
time.sleep(15)
|
# -*- coding: utf-8 -*-
from pyramid_oereb.standard.xtf_import.util import parse_string, parse_multilingual_text, parse_ref
class PublicLawRestriction(object):
TAG_INFORMATION = 'Aussage'
TAG_SUB_THEME = 'SubThema'
TAG_OTHER_THEME = 'WeiteresThema'
TAG_TYPE_CODE = 'ArtCode'
TAG_TYPE_CODE_LIST = 'ArtCodeliste'
TAG_LAW_STATUS = 'Rechtsstatus'
TAG_PUBLISHED_FROM = 'publiziertAb'
TAG_VIEW_SERVICE = 'DarstellungsDienst'
TAG_RESPONSIBLE_OFFICE = 'ZustaendigeStelle'
def __init__(self, session, model, topic_code):
self._session = session
self._model = model
self._topic_code = topic_code
def parse(self, public_law_restriction): # pragma: no cover
instance = self._model(
id=public_law_restriction.attrib['TID'],
information=parse_multilingual_text(public_law_restriction, self.TAG_INFORMATION),
topic=self._topic_code,
sub_theme=parse_string(public_law_restriction, self.TAG_SUB_THEME),
other_theme=parse_string(public_law_restriction, self.TAG_OTHER_THEME),
type_code=parse_string(public_law_restriction, self.TAG_TYPE_CODE),
type_code_list=parse_string(public_law_restriction, self.TAG_TYPE_CODE_LIST),
law_status=parse_string(public_law_restriction, self.TAG_LAW_STATUS),
published_from=parse_string(public_law_restriction, self.TAG_PUBLISHED_FROM),
view_service_id=parse_ref(public_law_restriction, self.TAG_VIEW_SERVICE),
office_id=parse_ref(public_law_restriction, self.TAG_RESPONSIBLE_OFFICE)
)
self._session.add(instance)
|
# Created by Andrzej Lach @ 2021
# https://github.com/AndrzejLach89
from aqa.math import *
from varmain.primitiv import *
from varmain.custom import *
import math
@activate(Group="Support", Ports=1, TooltipShort="Support - insulated, anchor", TooltipLong="Support - insulated, anchor", LengthUnit="mm")
@group("MainDimensions")
@param(D=LENGTH, TooltipShort="Pipe diameter")
@param(H=LENGTH, TooltipShort="Height", Ask4Dist=True)
@param(CL=LENGTH, TooltipShort="Clamp length")
@param(CT=LENGTH, TooltipShort="Clamp thickness")
@param(CW=LENGTH, TooltipShort="Clamp width")
@param(CO=LENGTH, TooltipShort="Clamp offset")
@param(W=LENGTH, TooltipShort="Bottom plate width")
@param(L=LENGTH, TooltipShort="Bottom plate length")
@param(T=LENGTH, TooltipShort="Plate thickness")
@param(NUT=LENGTH, TooltipShort="Nut size (Mxx)")
@param(PA=LENGTH, TooltipShort="Front/back plate width")
@param(PT=LENGTH, TooltipShort="Front/back plate thickness")
@param(LT=LENGTH, TooltipShort="Total length")
def SUP_INS_ANCH(s, D=114.3, H=192, CL=50, CT=8, W=100, L=200, T=11, CW= 226, CO=10, NUT=16, PA=60, PT=8, LT=230, ID='SUP_INS_ANCH', **kw):
nutSizes = {
8: {'h': 6.500, 'd': 13.000, 'x': 7.5056},
12: {'h': 10.000, 'd': 18.000, 'x': 10.3923},
16: {'h': 13.000, 'd': 24.000, 'x': 13.8564},
20: {'h': 16.000, 'd': 30.000, 'x': 17.3205},
24: {'h': 19.000, 'd': 36.000, 'x': 20.7846}
}
if NUT not in nutSizes:
NUT = min(nutSizes, key=lambda x:abs(x-NUT))
nutType = nutSizes[NUT]
if D <= 0 or H <=0 or CL <= 0 or CT <= 0 or T<=0 or PA<=0 or PT<=0:
return
if LT < L + 2*PT:
LT = L + 2*PT
if W < T:
W = T
body = BOX(s, L=T, W=H-D/2-T/2, H=L).translate((0, 0, (H-D/2-T/2)/2-H))
hPlate = BOX(s, L=W, W=T, H=L).translate((0, 0, T/2 - H))
body.uniteWith(hPlate)
hPlate.erase()
endPlateTranslations = ((L/2+PT/2, 0, -H/2+T/4), (-(L/2+PT/2), 0, -H/2+T/4))
endPlates = []
for i in endPlateTranslations:
endPlates.append(BOX(s,L=PA, W=H, H=PT).translate(i))
for i in endPlates:
endPlateCut = CYLINDER(s, R=D/2 + CT, H=LT, O=0).rotateY(90).translate((-LT/2, 0, 0))
i.subtractFrom(endPlateCut)
endPlateCut.erase()
for i in endPlates:
body.uniteWith(i)
i.erase()
endPlates.clear()
clamps = []
cnt = 0
clampOffset = ((CL/2 - LT/2, 0, 0), (-CL/2 + LT/2, 0, 0))
# bolts
nutHeight = nutType['h']
nutLength = nutType['d']
nutWidth = nutType['x']
cutRadius = math.sqrt(math.pow(nutHeight, 2) + math.pow(nutLength/2, 2))
for off in clampOffset:
clamps.append(CYLINDER(s, R=D/2+CT, H=CL, O=D/2).rotateY(90).translate((-CL/2, 0, 0)))
clampH = BOX(s, L=CW, W=2*CT+CO, H=CL)
vPlateCut = CYLINDER(s, R=D/2, H=CL, O=0).rotateY(90).translate((-CL/2, 0, 0))
clampH.subtractFrom(vPlateCut)
clamps[cnt].uniteWith(clampH)
if CO > 0:
clampCut = BOX(s, L=CW, W=CO, H=CL)
clamps[cnt].subtractFrom(clampCut)
clamps[cnt].translate(clampOffset[cnt])
mainOffsets = ((0, CW/2-(CW/2 - D/2 - CT)/2, 0), (0, -CW/2+(CW/2 - D/2 - CT)/2, 0))
boltH = 2*nutHeight + 2*CT + CO + 5
boltR = NUT/2
boltOffset = (0, 0, -boltH + CO/2 + CT + nutHeight)
bolts = []
nut1offset = (0, 0, nutHeight/2 + CO/2 + CT)
nut2offset = (0, 0, -nutHeight/2 - CO/2 - CT)
nutOffsets = (nut1offset, nut2offset)
for x in mainOffsets:
bolt = CYLINDER(s, R=boltR, H=boltH, O=0).translate(boltOffset)
boltHole = CYLINDER(s, R=boltR+0.5, H=boltH, O=0).translate(boltOffset)
nutParts = []
nc = 0
for i in nutOffsets:
nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).translate(i))
p1 = BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(60).translate(i)
p2 = BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(120).translate(i)
#nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(60).translate(i))
#nutParts.append(BOX(s, L=nutWidth, W=nutHeight, H=nutLength).rotateZ(120).translate(i))
c1 = HALFSPHERE(s, R=cutRadius).translate(i).translate((0, 0, -nutHeight/2))
nutParts[nc].uniteWith(p1)
nutParts[nc].uniteWith(p2)
nutParts[nc].intersectWith(c1)
p1.erase()
p2.erase()
c1.erase()
if nc == 1:
c2 = HALFSPHERE(s, R=cutRadius).rotateX(180).translate(i).translate((0, 0, nutHeight/2))
nutParts[nc].intersectWith(c2)
c2.erase()
nc += 1
for i in nutParts:
bolt.uniteWith(i)
bolt.translate(x)
boltHole.translate(x)
bolt.translate(clampOffset[cnt])
boltHole.translate(clampOffset[cnt])
clamps[cnt].subtractFrom(boltHole)
clamps[cnt].uniteWith(bolt)
bolt.erase()
boltHole.erase()
body.uniteWith(clamps[cnt])
cnt += 1
clamps.clear()
s.setPoint((0.000, 0.000, 0.000), (1.000, 0.000, 0.000))
s.setLinearDimension('H',(0, 0, 0), (0, 0, -H))
|
# -*- coding: utf-8 -*-
__version__ = '20.9.1.dev0'
PROJECT_NAME = "galaxy-util"
PROJECT_OWNER = PROJECT_USERAME = "galaxyproject"
PROJECT_URL = "https://github.com/galaxyproject/galaxy"
PROJECT_AUTHOR = 'Galaxy Project and Community'
PROJECT_DESCRIPTION = 'Galaxy Generic Utilities'
PROJECT_EMAIL = 'galaxy-committers@lists.galaxyproject.org'
RAW_CONTENT_URL = "https://raw.github.com/%s/%s/master/" % (
PROJECT_USERAME, PROJECT_NAME
)
|
'''
Training a trivial parametric monomial function "wx" (with no bias parameter)
to approximate the true hypothesis f(x)= 2x given 3 datapoints of observation (1, 2),(2, 4),(3, 6)
This learner has no practical usage (hence, its name).
We are using non-stochastic gradient descent and running weight updates for 30 epochs.
A rudimentary squared difference is used for the loss function:
From data, we get:
L(w) = (2-w)(2-w) + (4-2w)(4-2w) + (6-3w)(6-3w)
= (4 - 4w + w^2) + (16 - 16w + 4w^2) + (36 - 36w + 9w^2)
L(w) = 56 - 56w + 14w^2
L'(w) = -56 + 28w
Solving this analytically gives us w = 2
But for the sake of this exercise, we apply gradient descent with w starting at 0 <=> w_t = 0 for t=0 where t is epoch
w_t+1 = w_t - learning_rate * L'(w_t)
Training effectively overfits the data as the setup is completely hypothetical (e.g. there is no test data)
The point of the exercise is solely to get familiar with operating Google TensorFlow framework
'''
import tensorflow as tf
# Parameters
n_epoch = 30
n_features = 1
n_examples = 3
n_outputs = 1
learning_rate = .01
# Fetch the data
def fetch():
return {xx:[[1],[2],[3]], yy:[[2],[4],[6]]}
# Define the model
# Model inputs & outputs definitions
xx = tf.placeholder(tf.float32, shape=(n_examples, n_features), name = "MyInputs")
yy = tf.placeholder(tf.float32, shape=(n_examples, n_outputs), name = "MyLabels")
# Model hypothesis
ww = tf.Variable(tf.zeros(dtype=tf.float32, shape=(n_features, 1)), name = "MyWeights", trainable=True)
predict_yy = tf.matmul(xx, ww)
# Evaluate the loss
loss = tf.reduce_sum(tf.squared_difference(predict_yy, yy), name = "MyLoss")
# Train the model / Apply gradient updates (One Step)
# Calculate gradient of the loss for each weight
# + Update each weight
opt = tf.train.GradientDescentOptimizer(learning_rate= learning_rate)
minimizer = opt.minimize(loss, var_list=[ww])
# Evaluate the model against the test data. Test the model
def eval(inputs):
return tf.matmul(inputs, ww)
# Init variables
init = tf.initialize_all_variables()
tf.scalar_summary("Loss", tf.reduce_mean(loss))
tf.scalar_summary("Weight", tf.reduce_mean(ww))
merged = tf.merge_all_summaries()
def main():
print "Running %s" % __file__
#tf.is_variable_initialized(ww)
with tf.Session() as sess:
# Create a summary writer, add the 'graph' to the event file.
writer = tf.train.SummaryWriter(".", sess.graph)
init.run()
for epoch in range(n_epoch):
summaries, _, loss_value,_ =sess.run([merged, minimizer, loss, ww], feed_dict = fetch())
print 'epoch {:d}: loss is {:f}'.format(epoch, loss_value)
writer.add_summary(summaries, epoch)
# eval(test_data)
if __name__ == '__main__': main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.