text
stringlengths 2
999k
|
|---|
import pytest
from utils.phone_numbers import fix_number_formatting, validate_phone_number
@pytest.mark.parametrize(
"number, expected_result",
[
("7446123456", "07446123456"), # Test number with missing 0
("07446123456", "07446123456"), # Test number no spaces
("07446 123456", "07446123456"), # Test number with spaces
("+447446123456", "+447446123456"), # Test international number no spaces
("+447446 123456", "+447446123456"), # Test international number with spaces
("+4407446123456", "+447446123456"), # Test international number with a 0
("+44 07446 123456", "+447446123456"), # Test international number with a 0 and spaces
],
)
def test_fix_number_formatting(number, expected_result):
result = fix_number_formatting(number)
assert expected_result == result
@pytest.mark.parametrize(
"number, expected_result",
[
("07446123456", True), # Test number is valid
("074461234567", False), # Test number is too long
("+447446123456", True), # Test international number is valid
("+4407446123456", False), # Test international number contains 0
("+4474461234567", False), # Test international number is too long 0
]
)
def test_validate_phone_number(number, expected_result):
result = validate_phone_number(number)
assert expected_result == result
|
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Disnake Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import collections
import collections.abc
import inspect
import sys
import traceback
from typing import Any, Callable, List, TYPE_CHECKING, Optional, TypeVar, Type, Union
import disnake
from .core import GroupMixin
from .view import StringView
from .context import Context
from . import errors
from .help import HelpCommand, DefaultHelpCommand
from .common_bot_base import CommonBotBase
if TYPE_CHECKING:
from typing_extensions import ParamSpec
from disnake.message import Message
from disnake.interactions import ApplicationCommandInteraction
from ._types import (
Check,
CoroFunc,
)
ApplicationCommandInteractionT = TypeVar(
"ApplicationCommandInteractionT", bound=ApplicationCommandInteraction, covariant=True
)
AnyMessageCommandInter = Any # Union[ApplicationCommandInteraction, UserCommandInteraction]
AnyUserCommandInter = Any # Union[ApplicationCommandInteraction, UserCommandInteraction]
P = ParamSpec("P")
__all__ = (
"when_mentioned",
"when_mentioned_or",
"BotBase",
)
MISSING: Any = disnake.utils.MISSING
T = TypeVar("T")
CFT = TypeVar("CFT", bound="CoroFunc")
CXT = TypeVar("CXT", bound="Context")
def when_mentioned(bot: BotBase, msg: Message) -> List[str]:
"""A callable that implements a command prefix equivalent to being mentioned.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
"""
# bot.user will never be None when this is called
return [f"<@{bot.user.id}> ", f"<@!{bot.user.id}> "] # type: ignore
def when_mentioned_or(*prefixes: str) -> Callable[[BotBase, Message], List[str]]:
"""A callable that implements when mentioned or other prefixes provided.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
Example
--------
.. code-block:: python3
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
.. note::
This callable returns another callable, so if this is done inside a custom
callable, you must call the returned callable, for example:
.. code-block:: python3
async def get_prefix(bot, message):
extras = await prefixes_for(message.guild) # returns a list
return commands.when_mentioned_or(*extras)(bot, message)
See Also
----------
:func:`.when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r = when_mentioned(bot, msg) + r
return r
return inner
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self):
return "<default-help-command>"
_default: Any = _DefaultRepr()
class BotBase(CommonBotBase, GroupMixin):
def __init__(
self,
command_prefix: Optional[Union[str, List[str], Callable]] = None,
help_command: HelpCommand = _default,
description: str = None,
**options: Any,
):
super().__init__(**options)
self.command_prefix = command_prefix
self._checks: List[Check] = []
self._check_once = []
self._before_invoke = None
self._after_invoke = None
self._help_command = None
self.description: str = inspect.cleandoc(description) if description else ""
self.strip_after_prefix: bool = options.get("strip_after_prefix", False)
if help_command is _default:
self.help_command = DefaultHelpCommand()
else:
self.help_command = help_command
# internal helpers
async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:
"""|coro|
The default command error handler provided by the bot.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get("on_command_error", None):
return
command = context.command
if command and command.has_error_handler():
return
cog = context.cog
if cog and cog.has_error_handler():
return
print(f"Ignoring exception in command {context.command}:", file=sys.stderr)
traceback.print_exception(
type(exception), exception, exception.__traceback__, file=sys.stderr
)
# global check registration
def add_check(
self,
func: Check,
*,
call_once: bool = False,
) -> None:
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`,
:meth:`.check_once`, :meth:`.slash_command_check` and etc.
If none of bool params are specified, the check is for
text commands only.
Parameters
-----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(
self,
func: Check,
*,
call_once: bool = False,
) -> None:
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
If none of bool params are specified, the check is for
text commands only.
Parameters
-----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
l = self._check_once if call_once else self._checks
try:
l.remove(func)
except ValueError:
pass
def check(self, func: T) -> T:
r"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`.check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check
def check_commands(ctx):
return ctx.command.qualified_name in allowed_commands
"""
# T was used instead of Check to ensure the type matches on return
self.add_check(func) # type: ignore
return func
def check_once(self, func: CFT) -> CFT:
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
When using this function the :class:`.Context` sent to a group subcommand
may only parse the parent command and not the subcommands due to it
being invoked once per :meth:`.Bot.invoke` call.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
# type-checker doesn't distinguish between functions and methods
return await disnake.utils.async_all(f(ctx) for f in data) # type: ignore
def before_invoke(self, coro: CFT) -> CFT:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The pre-invoke hook must be a coroutine.")
self._before_invoke = coro
return coro
def after_invoke(self, coro: CFT) -> CFT:
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError("The post-invoke hook must be a coroutine.")
self._after_invoke = coro
return coro
# extensions
def _remove_module_references(self, name: str) -> None:
super()._remove_module_references(name)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# help command stuff
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError("help_command must be a subclass of HelpCommand")
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
# command processing
async def get_prefix(self, message: Message) -> Optional[Union[List[str], str]]:
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`disnake.Message`
The message context to get the prefix of.
Returns
--------
Optional[Union[List[:class:`str`], :class:`str`]]
A list of prefixes or a single prefix that the bot is
listening for. None if the bot isn't listening for prefixes.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await disnake.utils.maybe_coroutine(prefix, self, message)
if ret is None:
return None
if not isinstance(ret, str):
try:
ret = list(ret) # type: ignore
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError(
"command_prefix must be plain string, iterable of strings, or callable "
f"returning either of these, not {ret.__class__.__name__}"
)
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret
async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
-----------
message: :class:`disnake.Message`
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
--------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
"""
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if message.author.id == self.user.id: # type: ignore
return ctx
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if prefix is None:
return ctx
elif isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
# if the context class' __init__ consumes something from the view this
# will be wrong. That seems unreasonable though.
if message.content.startswith(tuple(prefix)):
invoked_prefix = disnake.utils.find(view.skip_string, prefix)
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError(
"get_prefix must return either a string or a list of string, "
f"not {prefix.__class__.__name__}"
)
# It's possible a bad command_prefix got us here.
for value in prefix:
if not isinstance(value, str):
raise TypeError(
"Iterable command_prefix or list returned from get_prefix must "
f"contain only strings, not {value.__class__.__name__}"
)
# Getting here shouldn't happen
raise
if self.strip_after_prefix:
view.skip_ws()
invoker = view.get_word()
ctx.invoked_with = invoker
# type-checker fails to narrow invoked_prefix type.
ctx.prefix = invoked_prefix # type: ignore
ctx.command = self.all_commands.get(invoker)
return ctx
async def invoke(self, ctx: Context) -> None:
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch("command", ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise errors.CheckFailure("The global check once functions failed.")
except errors.CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch("command_completion", ctx)
elif ctx.invoked_with:
exc = errors.CommandNotFound(f'Command "{ctx.invoked_with}" is not found')
self.dispatch("command_error", ctx, exc)
async def process_commands(self, message: Message) -> None:
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`.on_message`
event. If you choose to override the :func:`.on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.
This also checks if the message's author is a bot and doesn't
call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.
Parameters
-----------
message: :class:`disnake.Message`
The message to process commands for.
"""
if message.author.bot:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def on_message(self, message):
await self.process_commands(message)
|
#!/usr/bin/env python3
import sys
import argparse
from requests import get
from transip_rest_client import TransipRestClient
def getOptions(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description="DynDNS: Updates a DNS record for a dynamic IP address.")
parser.add_argument("-u", "--user", help="Your username.", required=True)
parser.add_argument("-k", "--key", help="Key file containing RSA private key.", required=True)
parser.add_argument("-n", "--name", help="Name of the record (e.g. 'www').", required=True)
parser.add_argument("-d", "--domain", help="Existing DNS domain (e.g. 'example.com').", required=True)
parser.add_argument("-v", "--verbose", action='store_true', help="Verbose mode.")
options = parser.parse_args(args)
return options
def find(arr , id):
for x in arr:
if x["name"] == id:
return x
def main(key, username, domain, name, verbose):
with open(key, 'r') as f:
my_RSA_key = f.read()
if "BEGIN RSA PRIVATE KEY" not in my_RSA_key:
print("Key in incorrect format, convert the key with the following command:")
print("openssl rsa -in privatekey.txt -out rsaprivatekey.txt")
return
newIp = get('https://api.ipify.org').text
if verbose:
print(f"Retrieved IP from api.ipify.org: {newIp}")
client = TransipRestClient(user=username, rsaprivate_key=my_RSA_key, global_key=True)
entries = client.get_dns_entries(domain=domain)
if verbose:
print(f"Found {len(entries)} DNS entries")
entry = find(entries, name)
if entry is None:
print(f"No ip found, adding {newIp}")
client.post_dns_entry(domain=domain, name=name, expire=300, record_type='A', content=newIp)
else:
oldIp = entry["content"]
if verbose:
print(f"Found current IP in DNS entry: {oldIp}")
if oldIp != newIp:
print(f"Updating {oldIp} to {newIp}")
client.patch_dns_entry(domain=domain, name=name, record_type='A', content=newIp)
else:
print(f"Not updating {oldIp}")
if __name__ == "__main__":
options = getOptions()
if options.verbose:
print("Verbose output enabled.")
main(options.key, options.user, options.domain, options.name, options.verbose)
|
import unittest
from tempfile import mkdtemp
from shutil import rmtree
class WidgetTestCase(unittest.TestCase):
def setUp(self):
from kivy.uix.widget import Widget
self.cls = Widget
self.root = Widget()
def test_add_remove_widget(self):
root = self.root
self.assertEqual(root.children, [])
c1 = self.cls()
root.add_widget(c1)
self.assertEqual(root.children, [c1])
root.remove_widget(c1)
self.assertEqual(root.children, [])
def test_invalid_add_widget(self):
from kivy.uix.widget import WidgetException
try:
# None of them should work
self.root.add_widget(None)
self.root.add_widget(WidgetException)
self.root.add_widget(self.cls)
self.fail()
except WidgetException:
pass
def test_clear_widgets(self):
root = self.root
self.assertEqual(root.children, [])
c1 = self.cls()
c2 = self.cls()
c3 = self.cls()
root.add_widget(c1, index=0)
root.add_widget(c2, index=1)
root.add_widget(c3, index=2)
self.assertEqual(root.children, [c1, c2, c3])
root.clear_widgets([c2])
self.assertEqual(root.children, [c1, c3])
root.clear_widgets([])
self.assertEqual(root.children, [c1, c3])
root.clear_widgets()
self.assertEqual(root.children, [])
def test_clear_widgets_children(self):
root = self.root
for _ in range(10):
root.add_widget(self.cls())
self.assertEqual(len(root.children), 10)
root.clear_widgets(root.children)
self.assertEqual(root.children, [])
def test_position(self):
wid = self.root
wid.x = 50
self.assertEqual(wid.x, 50)
self.assertEqual(wid.pos, [50, 0])
wid.y = 60
self.assertEqual(wid.y, 60)
self.assertEqual(wid.pos, [50, 60])
wid.pos = (0, 0)
self.assertEqual(wid.pos, [0, 0])
self.assertEqual(wid.x, 0)
self.assertEqual(wid.y, 0)
def test_size(self):
wid = self.root
wid.width = 50
self.assertEqual(wid.width, 50)
self.assertEqual(wid.size, [50, 100])
wid.height = 60
self.assertEqual(wid.height, 60)
self.assertEqual(wid.size, [50, 60])
wid.size = (100, 100)
self.assertEqual(wid.size, [100, 100])
self.assertEqual(wid.width, 100)
self.assertEqual(wid.height, 100)
def test_collision(self):
wid = self.root
self.assertEqual(wid.pos, [0, 0])
self.assertEqual(wid.size, [100, 100])
self.assertEqual(wid.collide_point(-1, -1), False)
self.assertEqual(wid.collide_point(0, 0), True)
self.assertEqual(wid.collide_point(50, 50), True)
self.assertEqual(wid.collide_point(100, 100), True)
self.assertEqual(wid.collide_point(200, 0), False)
self.assertEqual(wid.collide_point(500, 500), False)
# Currently rejected with a Shader didn't link, but work alone.
@unittest.skip("Doesn't work with testsuite, but work alone")
def test_export_to_png(self):
from kivy.core.image import Image as CoreImage
from kivy.uix.button import Button
from os.path import join
wid = Button(text='test', size=(200, 100), size_hint=(None, None))
self.root.add_widget(wid)
tmp = mkdtemp()
wid.export_to_png(join(tmp, 'a.png'))
wid.export_to_png(join(tmp, 'b.png'), scale=.5)
wid.export_to_png(join(tmp, 'c.png'), scale=2)
self.assertEqual(CoreImage(join(tmp, 'a.png')).size, (200, 100))
self.assertEqual(CoreImage(join(tmp, 'b.png')).size, (100, 50))
self.assertEqual(CoreImage(join(tmp, 'c.png')).size, (400, 200))
rmtree(tmp)
self.root.remove_widget(wid)
def test_disabled(self):
from kivy.uix.widget import Widget
w = Widget(disabled=None)
w.disabled = False
w.disabled = True
self.assertEqual(w.disabled, True)
|
import time
import sys
import random
import datetime
import rq
import rq.job
import rq.compat
import rq.worker
from rq.defaults import (DEFAULT_LOGGING_FORMAT, DEFAULT_LOGGING_DATE_FORMAT)
class NonForkWorker(rq.Worker):
def __init__(self, *args, **kwargs):
if kwargs.get('default_worker_ttl', None) is None:
kwargs['default_worker_ttl'] = 2
super(NonForkWorker, self).__init__(*args, **kwargs)
def work(self, burst=False, logging_level="INFO", date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT, max_jobs=None, with_scheduler=False):
self.default_worker_ttl = 2
return super(NonForkWorker, self).work(
burst=burst,
logging_level=logging_level,
date_format=date_format,
log_format=log_format,
max_jobs=max_jobs,
with_scheduler=with_scheduler
)
def execute_job(self, job, queue):
self.main_work_horse(job, queue)
def main_work_horse(self, job, queue):
random.seed()
self._is_horse = True
success = self.perform_job(job, queue)
self._is_horse = False
def perform_job(self, job, queue, heartbeat_ttl=None):
self.prepare_job_execution(job)
self.procline('Processing %s from %s since %s' % (
job.func_name,
job.origin, time.time()))
try:
job.started_at = datetime.datetime.now()
# I have DISABLED the time limit!
rv = job.perform()
# Pickle the result in the same try-except block since we need to
# use the same exc handling when pickling fails
job._result = rv
job._status = rq.job.JobStatus.FINISHED
job.ended_at = datetime.datetime.now()
with self.connection.pipeline() as pipeline:
pipeline.watch(job.dependents_key)
queue.enqueue_dependents(job, pipeline=pipeline)
self.set_current_job_id(None, pipeline=pipeline)
self.increment_successful_job_count(pipeline=pipeline)
result_ttl = job.get_result_ttl(self.default_result_ttl)
if result_ttl != 0:
job.save(pipeline=pipeline, include_meta=False)
job.cleanup(result_ttl, pipeline=pipeline,
remove_from_queue=False)
pipeline.execute()
except:
# Use the public setter here, to immediately update Redis
job.status = rq.job.JobStatus.FAILED
self.handle_exception(job, *sys.exc_info())
return False
if rv is None:
self.log.info('Job OK')
else:
self.log.info('Job OK, result = %s' % (rq.worker.yellow(rq.compat.text_type(rv)),))
if result_ttl == 0:
self.log.info('Result discarded immediately.')
elif result_ttl > 0:
self.log.info('Result is kept for %d seconds.' % result_ttl)
else:
self.log.warning('Result will never expire, clean up result key manually.')
return True
|
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright © 2011, 2012, 2013, 2014, 2015 OnlineGroups.net and
# Contributors.
#
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
############################################################################
import codecs
import os
from setuptools import setup, find_packages
from version import get_version
version = get_version()
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
with codecs.open(os.path.join("docs", "HISTORY.rst"),
encoding='utf-8') as f:
long_description += '\n' + f.read()
setup(
name='gs.site.change.name',
version=version,
description="Change the name of a GroupServer site",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
"Environment :: Web Environment",
"Framework :: Zope2",
"Intended Audience :: Developers",
'License :: OSI Approved :: Zope Public License',
"Natural Language :: English",
"Natural Language :: French",
"Natural Language :: German",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='site ,groupserver, name, configure, admin',
author='Michael JasonSmith',
author_email='mpj17@onlinegroups.net',
url='https://source.iopen.net/groupserver/gs.site.change.name/',
license='ZPL 2.1',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['gs', 'gs.site', 'gs.site.change', ],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'zope.formlib',
'zope.browserpage',
'zope.i18n[compile]',
'zope.i18nmessageid',
'zope.interface',
'zope.schema',
'zope.tal',
'zope.tales',
'zope.viewlet',
'Zope2',
'gs.content.form.base',
'gs.content.layout',
'gs.help',
'gs.site.change.base',
'Products.GSContent',
],
entry_points="""
# -*- Entry points: -*-
""",)
|
"""\
C++ code generator
@copyright: 2002-2007 Alberto Griggio
@copyright: 2012-2016 Carsten Grohmann
@copyright: 2017-2020 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import os.path, re, logging
from codegen import BaseLangCodeWriter, BaseSourceFileContent, _replace_tag
from codegen import ClassLines as BaseClassLines
import config, wcodegen
class SourceFileContent(BaseSourceFileContent):
"""Keeps info about an existing file that has to be updated, to replace only the lines inside a wxGlade block,
and to keep the rest of the file as it was.
@ivar event_handlers: dictionary of event handlers for each class
@ivar header_content: Content of the header file
@ivar source_content: Content of the source file"""
rec_block_start = re.compile(
r'^(?P<spaces>\s*)' # leading spaces
r'//\s*' # comment sign
r'begin\s+wxGlade:\s*' # "begin wxGlade:" statement and tailing spaces
r'(?P<classname>\w*)' # class or function name
r'::' # separator between class and function / block (non-greedy)
r'(?P<block>\w+)' # function / block name
r'\s*$' # tailing spaces
)
rec_block_end = re.compile(
r'^\s*' # leading spaces
r'//\s*' # comment sign
r'end\s+wxGlade' # "end exGlade" statement
r'\s*$' # tailing spaces
)
rec_class_end = re.compile(
r'^\s*};\s*' # closing curly brackets
r'//\s*' # comment sign
r'wxGlade:\s+end\s+class' # "wxGlade: end class" statement
r'\s*$' # tailing spaces
)
"Regexp to match last line of a class statement"
rec_class_decl = re.compile(
r'^\s*' # leading spaces
r'class\s+([a-zA-Z_]\w*)' # "class <name>" statement
r'\s*' # tailing spaces
)
"""Regexp to match class declarations
This isn't very accurate - doesn't match template classes, nor virtual
inheritance, but should be enough for most cases"""
rec_decl_event_table = re.compile(
r'^\s*' # leading spaces
r'DECLARE_EVENT_TABLE\s*\(\s*\)\s*;?' # declaration of the event table
r'\s*$' # tailing spaces
)
"Regexp to match declaration of event table"
rec_def_event_table = re.compile(
r'^\s*' # leading spaces
r'BEGIN_EVENT_TABLE\s*\(\s*(\w+)\s*,\s*(\w+)\s*\)'
r'\s*$' # tailing spaces
)
"Regexp to match event table"
rec_event_handler = re.compile(
r'^\s*' # leading spaces
r'(?:virtual\s+)?'
r'void\s+(?P<handler>[A-Za-z_]+\w*)' # event handler name
r'\s*' # optional spaces
r'\([A-Za-z_:0-9]+\s*&\s*\w*\)\s*;'
r'\s*' # optional spaces
r'//\s*wxGlade:\s*<event_handler>' # wxGlade event handler statement
r'\s*$' # tailing spaces
)
rec_event_handlers_marker = re.compile(
r'^\s*' # leading spaces
r'//\s*wxGlade:\s*add\s+'
r'((?:\w|:)+)\s+event handlers'
r'\s*$' # tailing spaces
)
"Regexp to match wxGlade comment of event handlers"
def __init__(self, name, code_writer):
# initialise new variables first
self.header_content = None
#self.source_content = None
self.content = None
self.event_table_decl = {}
self.event_table_def = {}
self.header_extension = code_writer.header_extension
self.source_extension = code_writer.source_extension
# call inherited constructor
BaseSourceFileContent.__init__(self, name, code_writer)
def replace_header(self, tag, content):
return _replace_tag(self.header_content, tag, content)
def build_untouched_content(self):
BaseSourceFileContent.build_untouched_content(self)
self._build_untouched(self.name + "." + self.header_extension, True)
BaseSourceFileContent.build_untouched_content(self)
self._build_untouched(self.name + "." + self.source_extension, False)
def _build_untouched(self, filename, is_header):
prev_was_handler = False
events_tag_added = False
inside_block = False
inside_comment = False
tmp_in = self._load_file(filename)
out_lines = []
check_old_methods = [] # list of indices with set_properties or do_layout
for line in tmp_in:
comment_index = line.find('/*')
if not inside_comment and comment_index != -1 and comment_index > line.find('//'):
inside_comment = True
if inside_comment:
end_index = line.find('*/')
if end_index > comment_index:
inside_comment = False
if not is_header:
result = None
else:
result = self.rec_class_decl.match(line)
if not inside_comment and not inside_block and result:
if not self.class_name:
# this is the first class declared in the file: insert the new ones before this
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
self.new_classes_inserted = True
self.class_name = result.group(1)
self.class_name = self.format_classname(self.class_name)
self.classes.add( self.class_name ) # add the found class to the list of classes of this module
out_lines.append(line)
elif not inside_block:
result = self.rec_block_start.match(line)
if not inside_comment and result:
# replace the lines inside a wxGlade block with a tag that will be used later by add_class
spaces = result.group('spaces')
which_class = result.group('classname')
which_block = result.group('block')
if not which_class:
which_class = self.class_name
else:
which_class = self.format_classname(which_class)
self.spaces[which_class] = spaces
inside_block = True
if which_block in ("do_layout","set_properties"):
# probably to be removed
check_old_methods.append( len(out_lines) )
out_lines.append( '<%swxGlade replace %s %s>' %
(self.nonce, result.group('classname'), result.group('block') ) )
else:
dont_append = False
# ALB 2004-12-08 event handling support...
if is_header and not inside_comment:
result = self.rec_event_handler.match(line)
if result:
prev_was_handler = True
which_handler = result.group('handler')
which_class = self.class_name
self.event_handlers.setdefault( which_class, set() ).add( which_handler )
else:
if prev_was_handler:
# add extra event handlers here...
out_lines.append('<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
prev_was_handler = False
events_tag_added = True
elif not events_tag_added and \
self.is_end_of_class(line):
out_lines.append( '<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
# now try to see if we already have a DECLARE_EVENT_TABLE
result = self.rec_decl_event_table.match(line)
if result:
self.event_table_decl[self.class_name] = True
elif not inside_comment:
result = self.rec_event_handlers_marker.match(line)
if result:
out_lines.append( '<%swxGlade add %s event handlers>' % (self.nonce, result.group(1)) )
dont_append = True
result = self.rec_def_event_table.match(line)
if result:
which_class = result.group(1)
self.event_table_def[which_class] = True
# ----------------------------------------
if not dont_append:
out_lines.append(line)
else:
# ignore all the lines inside a wxGlade block
if self.rec_block_end.match(line):
inside_block = False
if is_header and not self.new_classes_inserted:
# if we are here, the previous ``version'' of the file did not contain any class, so we must add the
# new_classes tag at the end of the file
out_lines.append('<%swxGlade insert new_classes>' % self.nonce)
# when moving from 0.9 to 1.0: remove empty methods "do_layout" and "set_properties"
while check_old_methods:
i = check_old_methods.pop(-1)
if out_lines[i+1].strip()=='}': # just end of block -> remove incl. trailing empty lines
self._remove_method(out_lines, i-2, i+1)
# set the ``persistent'' content of the file
if is_header:
self.header_content = out_lines
else:
self.content = out_lines
def is_end_of_class(self, line):
"""Returns True if the line is the last line of a class
Not really, but for wxglade-generated code it should work..."""
return self.rec_class_end.match(line)
class ClassLines(BaseClassLines):
"""Stores the lines of C++ code for a custom class"""
def __init__(self):
BaseClassLines.__init__(self)
self.ids = [] # Ids declared in the source (for Evt. handling): grouped in a public enum in the custom class
self.sub_objs = [] # List of 2-tuples (type, name) of the sub-objects; attributes of the toplevel object
self.extra_code_h = [] # Extra header code to output
self.extra_code_cpp = [] # Extra source code to output
self.dependencies = set()
class CPPCodeWriter(BaseLangCodeWriter, wcodegen.CppMixin):
"""Code writer class for writing C++ code out of the designed GUI elements
source_extension: Extension of the source file
header_extension: Extension of the header file
last_generated_id: Last generated Id number (wxNewId() is not used yet)
tmpl_init_gettext: Template for inclusion of i18n headers and defining APP_CATALOG constant or None
see: BaseLangCodeWriter"""
ClassLines = ClassLines
_code_statements = {
'backgroundcolour': "%(objname)sSetBackgroundColour(%(value)s);\n",
'disabled': "%(objname)sEnable(0);\n",
'extraproperties': "%(objname)sSet%(propname_cap)s(%(value)s);\n",
'focused': "%(objname)sSetFocus();\n",
'foregroundcolour': "%(objname)sSetForegroundColour(%(value)s);\n",
'hidden': "%(objname)sHide();\n",
'setfont': "%(objname)sSetFont(wxFont(%(size)s, %(family)s, "
"%(style)s, %(weight)s, %(underlined)s, wxT(%(face)s)));\n",
'tooltip': "%(objname)sSetToolTip(%(tooltip)s);\n",
'wxcolour': "wxColour(%(value)s)",
'wxnullcolour': "wxNullColour",
'wxsystemcolour': "wxSystemSettings::GetColour(%(value)s)",
}
class_separator = '::'
language_note = \
'// Example for compiling a single file project under Linux using g++:\n' \
'// g++ MyApp.cpp $(wx-config --libs) $(wx-config --cxxflags) -o MyApp\n' \
'//\n' \
'// Example for compiling a multi file project under Linux using g++:\n' \
'// g++ main.cpp $(wx-config --libs) $(wx-config --cxxflags) -o MyApp Dialog1.cpp Frame1.cpp\n' \
'//\n'
output_name = None # If not None, name (without extension) of the file to write into
output_header = None # Temporary storage of header file for writing into (list)
output_file = None # Temporary storage of source file for writing into (list)
shebang = '// -*- C++ -*-\n//\n'
tmpl_cfunc_end = '}\n\n'
tmpl_sizeritem = '%s->Add(%s, %s, %s, %s);\n'
tmpl_sizeritem_button = '%s->AddButton(%s)\n'
tmpl_gridbagsizeritem = '%s->Add(%s, wxGBPosition%s, wxGBSpan%s, %s, %s);\n'
tmpl_gridbagsizerspacer = '%s->Add(%s, %s, wxGBPosition%s, wxGBSpan%s, %s, %s);\n'
tmpl_spacersize = '%s, %s'
tmpl_appfile = """\
%(overwrite)s\
%(header_lines)s\
#include "%(filename_top_win_class)s"
"""
tmpl_init_gettext = """\
#include <wx/intl.h>
#ifndef APP_CATALOG
#define APP_CATALOG "%(textdomain)s" // replace with the appropriate catalog name
#endif
"""
def _get_app_template(self, app, top_win):
'build template string for application'
if not self.app_name: return None
# XXX use Show() for frames/panels and ShowModal()/Destroy for dialogs
klass = app.klass
if self._use_gettext:
gettext1 = ["protected:", "%(tab)swxLocale m_locale; // locale we'll be using"]
gettext2 = ['%(tab)sm_locale.Init();',
'#ifdef APP_LOCALE_DIR',
'%(tab)sm_locale.AddCatalogLookupPathPrefix(wxT(APP_LOCALE_DIR));',
'#endif',
'%(tab)sm_locale.AddCatalog(wxT(APP_CATALOG));\n']
else:
gettext1 = gettext2 = []
if klass:
klass1 = 'class %(klass)s: public wxApp {'
klass2 = ['IMPLEMENT_APP(%(klass)s)\n',
'bool %(klass)s::OnInit()']
else:
klass1 = 'class MyApp: public wxApp {'
klass2 = ['IMPLEMENT_APP(MyApp)\n',
'bool MyApp::OnInit()',]
ret = ['', klass1,
'public:', '%(tab)sbool OnInit();'
] + gettext1 + ['};\n'] + klass2 + ['{'] + gettext2 + [
'%(tab)swxInitAllImageHandlers();',
'%(tab)s%(top_win_class)s* %(top_win)s = new %(top_win_class)s(NULL, wxID_ANY, wxEmptyString);',
'%(tab)sSetTopWindow(%(top_win)s);',
'%(tab)s%(top_win)s->Show();',
'%(tab)sreturn true;',
'}', '']
return '\n'.join(ret)
tmpl_empty_string = 'wxEmptyString'
def init_lang(self, app=None):
self.last_generated_id = 1000
self.generated_ids = {}
# Extensions and main filename based on Project options when set
if app is not None:
self.source_extension = app.source_extension or config.default_source_extension
self.header_extension = app.header_extension or config.default_header_extension
else:
self.source_extension = config.default_source_extension
self.header_extension = config.default_header_extension
if hasattr(app, "app_filename"): # only for testing
base = os.path.splitext(app.app_filename)[0]
else:
base = os.path.splitext(config.default_cpp_app_name)[0] #
self.app_filename = '%s.%s' % (base, self.source_extension)
self.header_lines = [ '#include <wx/wx.h>\n',
'#include <wx/image.h>\n' ]
# include i18n / gettext
if self._use_gettext and self._textdomain:
self.header_lines.append( self.tmpl_init_gettext % {'textdomain': self._textdomain} )
# extra lines to generate (see the 'extracode' property of top-level widgets)
self._current_extra_code_h = []
self._current_extra_code_cpp = []
def init_files(self, out_path):
if self.multiple_files:
self.previous_source = None
self.out_dir = out_path
else:
name = os.path.splitext(out_path)[0]
self.output_name = name
if not self._overwrite:
header_exists = self._file_exists(name + "." + self.header_extension)
source_exists = self._file_exists(name + "." + self.source_extension)
if (header_exists and not source_exists) or (source_exists and not header_exists):
ret = _("To keep existing user code, both header and source file must exist.\n"
"(files '%s...'")
return ret%name
if not self._overwrite and header_exists:
# keep all the lines not inside a wxGlade block.
self.previous_source = SourceFileContent(name, self)
else:
# if the file doesn't exist, create it and write the intro
self.previous_source = None
self.output_header = []
self.output_file = []
# isolation directives
oh = os.path.basename(name + "." + self.header_extension).upper().replace( '.', '_' )
self.output_header.append('#ifndef %s\n#define %s\n' % (oh, oh))
self.output_header.append('\n')
for line in self.header_lines:
self.output_header.append(line)
self.output_header.append('\n')
# now, write the tags to store dependencies and extra code
self.output_header.append('<%swxGlade replace dependencies>' % self.nonce)
self.output_header.append('\n')
self.output_header.append('<%swxGlade replace extracode>' % self.nonce)
self.output_header.append('\n')
self.output_file.append('#include "%s.%s"\n\n' % (os.path.basename(name), self.header_extension))
self.output_file.append('<%swxGlade replace extracode>\n' % self.nonce)
self.output_file.append('\n')
def output_header_replace(self, tag, content):
_replace_tag(self.output_header, tag, content)
def finalize(self):
if self.previous_source:
# insert all the new custom classes inside the old file
tag = '<%swxGlade insert new_classes>' % self.nonce
if self.previous_source.new_classes:
code = "".join([c[0] for c in self.previous_source.new_classes])
else:
code = ""
self.previous_source.replace_header(tag, code)
extra_source = "".join([c[1] for c in self.previous_source.new_classes])
# extra code (see the 'extracode' property of top-level widgets)
tag = '<%swxGlade replace extracode>' % self.nonce
code = self._tagcontent( '::extracode', self._current_extra_code_h )
self.previous_source.replace_header(tag, code)
code = self._tagcontent( '::extracode', self._current_extra_code_cpp )
self.previous_source.replace(tag, code)
# --------------------------------------------------------------
# now remove all the remaining <123415wxGlade ...> tags from the source:
# this may happen if we're not generating multiple files, and one of the container class names is changed
tags = re.compile( r'(<%swxGlade replace ([a-zA-Z_]*\w*) (\w+)>)' % self.nonce )
for i,line in enumerate(self.previous_source.header_content):
match = tags.match(line)
if not match: continue
tag = match.groups()
if tag[2] == 'dependencies':
#self._logger.debug('writing dependencies')
deps = set()
for code in self.classes.values():
deps.update(code.dependencies)
lines = self._format_dependencies( deps )
elif tag[2] == 'methods':
lines = ''
else:
lines = '// content of this block (%s) not found: did you rename this class?\n' % tag[2]
self.previous_source.replace_header(tag[0], lines)
# remove all the remaining <123415wxGlade ...> tags in source file XXX make more efficient
self._content_notfound( self.previous_source )
tag_start = r'<%swxGlade add ' % self.nonce
tag_end = r' event_handlers>'
for i, line in enumerate(self.previous_source.content):
if line.startswith(tag_start) and line.endswith(tag_end):
source_content.content[i] = ""
# write the new file contents to disk
header_content = "".join( self.previous_source.header_content )
self.save_file( self.previous_source.name + "." + self.header_extension, header_content, content_only=True )
if extra_source:
extra_source = '\n\n' + extra_source
source_content = "".join( self.previous_source.content )
self.save_file( self.previous_source.name + "." + self.source_extension, source_content + extra_source,
content_only=True )
elif not self.multiple_files:
oh = os.path.basename(self.output_name).upper() + '_H'
self.output_header.append('\n#endif // %s\n' % oh)
# write the list of include files
deps = set()
for code in self.classes.values():
deps.update(code.dependencies)
code = self._format_dependencies( deps )
self.output_header_replace( '<%swxGlade replace dependencies>' % self.nonce, code )
# extra code (see the 'extracode' property of top-level widgets)
tag = '<%swxGlade replace extracode>' % self.nonce
code = self._tagcontent('::extracode', self._current_extra_code_h)
self.output_header_replace( tag, code )
code = self._tagcontent('::extracode', self._current_extra_code_cpp)
self.output_file_replace( tag, code )
self.save_file( self.output_name + "." + self.header_extension, self.output_header, self._app_added )
self.save_file( self.output_name + "." + self.source_extension, self.output_file, self._app_added )
self.output_file = self.output_header = None
def add_app(self, app_attrs, top_win):
# add language specific mappings
self.lang_mapping['filename_top_win_class'] = '%s.%s' % (top_win.klass, self.header_extension)
BaseLangCodeWriter.add_app(self, app_attrs, top_win)
def add_class(self, code_obj):
assert code_obj not in self.classes
try:
builder = self.obj_builders[code_obj.WX_CLASS]
except KeyError:
logging.error('%s', code_obj)
# this is an error, let the exception be raised; the details are logged by the global exception handler
raise
ret = self.classes[code_obj] = self.ClassLines() # ClassLines will collect the code lines incl. children
return ret
def finalize_class(self, code_obj):
# write the collected code for the class and its children
base = code_obj.WX_CLASS
klass = self.classes[code_obj]
classname = code_obj.klass
fmt_klass = self.cn_class(classname)
if self.multiple_files:
# let's see if the file to generate exists, and in this case create a SourceFileContent instance
filename = os.path.join(self.out_dir, classname.replace('::', '_') + "." + self.header_extension)
if self._overwrite or not self._file_exists(filename):
prev_src = None
else:
prev_src = SourceFileContent( os.path.join(self.out_dir, classname), self )
else:
# in this case, previous_source is the SourceFileContent instance
# that keeps info about the single file to generate
prev_src = self.previous_source
if prev_src and classname in prev_src.classes:
is_new = False
else:
# this class wasn't in the previous version of the source (if any)
is_new = True
builder = self.obj_builders[base]
mycn = getattr(builder, 'cn', self.cn)
mycn_f = getattr(builder, 'cn_f', self.cn_f)
# collect all event handlers
event_handlers = klass.event_handlers
for win_id, evt, handler, evt_type in builder.get_event_handlers(code_obj):
event_handlers.append((win_id, mycn(evt), handler, evt_type))
# try to see if there's some extra code to add to this class
extra_code = getattr(builder, 'extracode', getattr(code_obj, 'extracode', "") or "")
if extra_code:
extra_code = re.sub(r'\\n', '\n', extra_code)
extra_code = re.split(re.compile(r'^###\s*$', re.M), extra_code, 1)
klass.extra_code_h.append(extra_code[0])
if len(extra_code) > 1:
klass.extra_code_cpp.append(extra_code[1])
if not is_new:
self.warning( '%s has extra code, but you are not overwriting existing sources:'
' please check that the resulting code is correct!' % code_obj.name )
if not self.multiple_files:
if klass.extra_code_h:
self._current_extra_code_h.append( "".join( klass.extra_code_h[::-1] ) )
if klass.extra_code_cpp:
self._current_extra_code_cpp.append( "".join( klass.extra_code_cpp[::-1] ) )
default_sign = [('wxWindow*', 'parent'), ('wxWindowID', 'id')]
sign = getattr(builder, 'constructor', default_sign)
defaults = []
for t in sign:
if len(t) == 3:
defaults.append(t[2])
else:
defaults.append(None)
tmp_sign = [t[0] + ' ' + t[1] for t in sign]
sign_decl2 = ', '.join(tmp_sign)
for i in range(len(tmp_sign)):
if defaults[i]:
tmp_sign[i] += '=%s' % defaults[i]
sign_decl1 = ', '.join(tmp_sign)
sign_inst = ', '.join([t[1] for t in sign])
# custom base classes support
custom_base = code_obj.check_prop_nodefault('custom_base') and code_obj.custom_base.strip() or None
# the header and code lines
header_buffer = []
source_buffer = []
hwrite = header_buffer.append
swrite = source_buffer.append
# generate constructor code
if is_new:
pass
elif custom_base:
# custom base classes set, but "overwrite existing sources" not
# set. Issue a warning about this
self.warning( '%s has custom base classes, but you are not overwriting existing sources: '
'please check that the resulting code is correct!' % code_obj.name )
if is_new:
# header file
if custom_base:
base = ", public ".join([b.strip() for b in custom_base.split(',')])
hwrite('\nclass %s: public %s {\n' % (fmt_klass, base))
hwrite('public:\n')
# the first thing to add it the enum of the various ids
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::ids\n' % fmt_klass)
ids = klass.ids
# let's try to see if there are extra ids to add to the enum
if hasattr(builder, 'get_ids_code'):
ids.extend(builder.get_ids_code(code_obj))
if ids:
hwrite(self.tabs(1) + 'enum {\n')
for id_name in ids:
hwrite('%s%s,\n' % (self.tabs(2), id_name))
hwrite(self.tabs(1) + '};\n')
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n\n')
# constructor prototype
hwrite(self.tabs(1) + '%s(%s);\n' % (fmt_klass, sign_decl1))
hwrite('\nprivate:\n')
# declarations of the attributes
hwrite('\n')
hwrite('protected:\n')
hwrite(self.tabs(1) + '// begin wxGlade: %s::attributes\n' % fmt_klass)
for o_type, o_name in klass.sub_objs:
hwrite(self.tabs(1) + '%s* %s;\n' % (o_type, o_name))
hwrite(self.tabs(1) + '// end wxGlade\n')
if event_handlers:
t = self.tabs(1)
hwrite('\n' + t + 'DECLARE_EVENT_TABLE();\n')
hwrite('\npublic:\n')
already_there = set()
for win_id, evt, handler, evt_type in event_handlers:
if handler not in already_there:
hwrite('%svirtual void %s(%s &event); // wxGlade: <event_handler>\n' % (t, handler, evt_type))
already_there.add( handler )
hwrite('}; // wxGlade: end class\n\n')
elif prev_src:
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::ids\n' % fmt_klass)
ids = klass.ids
# let's try to see if there are extra ids to add to the enum
if hasattr(builder, 'get_ids_code'):
ids.extend(builder.get_ids_code(code_obj))
if ids:
hwrite(self.tabs(1) + 'enum {\n')
for id_name in ids:
hwrite('%s%s,\n' % (self.tabs(2), id_name))
hwrite(self.tabs(1) + '};\n')
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n')
tag = '<%swxGlade replace %s ids>' % (self.nonce, classname)
if not prev_src.replace_header( tag, "".join(header_buffer) ):
# no ids tag found, issue a warning and do nothing
self.warning("wxGlade ids block not found for %s, ids declarations code NOT generated" % code_obj.name)
# remove methods block if in old file
tag = '<%swxGlade replace %s methods>' % (self.nonce, classname)
prev_src.replace_header(tag, [])
header_buffer = []
hwrite = header_buffer.append
if self._mark_blocks:
hwrite(self.tabs(1) + '// begin wxGlade: %s::attributes\n' % fmt_klass)
for o_type, o_name in klass.sub_objs:
hwrite(self.tabs(1) + '%s* %s;\n' % (o_type, o_name))
if self._mark_blocks:
hwrite(self.tabs(1) + '// end wxGlade\n')
tag = '<%swxGlade replace %s attributes>' % (self.nonce, classname)
if not prev_src.replace_header(tag, "".join(header_buffer)):
# no attributes tag found, issue a warning and do nothing
self.warning( "wxGlade attributes block not found for %s, attributes declarations code NOT generated" %
code_obj.name )
header_buffer = []
hwrite = header_buffer.append
if event_handlers:
already_there = prev_src.event_handlers.get(classname, set())
t = self.tabs(1)
for win_id, evt, handler, evt_type in event_handlers:
if handler not in already_there:
hwrite('%svirtual void %s(%s &event); // wxGlade: <event_handler>\n' % (t, handler, evt_type))
already_there.add( handler )
if classname not in prev_src.event_table_def:
hwrite('\nprotected:\n')
hwrite(self.tabs(1) + 'DECLARE_EVENT_TABLE()\n')
tag = '<%swxGlade event_handlers %s>' % (self.nonce, classname)
if not prev_src.replace_header( tag, "".join(header_buffer) ):
# no attributes tag found, issue a warning and do nothing
self.warning( "wxGlade events block not found for %s, event table code NOT generated" % code_obj.name )
# source file
tab = self.tabs(1)
# set the window's style
style_p = code_obj.properties.get("style")
if style_p and style_p.value_set != style_p.default_value:
style = mycn_f(style_p.get_string_value())
if style:
sign_inst = sign_inst.replace('style', '%s' % style)
# constructor
if is_new:
base = "%s(%s)" % (base, sign_inst)
if custom_base:
bases = [b.strip() for b in custom_base.split(',')]
if bases:
base = "%s(%s)" % (bases[0], sign_inst)
rest = ", ".join([b + "()" for b in bases[1:]])
if rest:
base += ", " + rest
swrite('\n%s::%s(%s):\n%s%s\n{\n' % (fmt_klass, fmt_klass, sign_decl2, tab, base) )
if self._mark_blocks:
swrite(tab + '// begin wxGlade: %s::%s\n' % (fmt_klass, fmt_klass))
# the optional initial code from the code properties
if not self.preview and code_obj.check_prop("extracode_pre"):
for l in code_obj.properties["extracode_pre"].get_lines():
swrite(tab + l)
# set size here to avoid problems with splitter windows
if 'size' in code_obj.properties and code_obj.properties["size"].is_active():
swrite( tab + self.generate_code_size(code_obj) )
for l in builder.get_properties_code(code_obj):
swrite(tab + l)
for l in klass.init:
swrite(tab + l)
if klass.final:
swrite(tab + "\n")
for l in klass.final:
swrite(tab + l)
for l in builder.get_layout_code(code_obj):
swrite(tab + l)
# the optional final code from the code properties
if not self.preview and code_obj.check_prop("extracode_post"):
for l in code_obj.properties["extracode_post"].get_lines():
swrite(tab + l)
# now check if there are extra lines to add to the constructor
for l in builder.get_init_code(code_obj):
swrite(tab + l)
swrite( self.tmpl_ctor_call_layout % {'tab':tab} )
if self._mark_blocks:
# end tag
swrite('%s%s end wxGlade\n' % (tab, self.comment_sign))
# write class function end statement
if self.tmpl_cfunc_end and is_new:
swrite( self.tmpl_cfunc_end % {'tab':tab} )
# replace code inside existing constructor block
if prev_src and not is_new:
# replace the lines inside the ctor wxGlade block
# with the new ones
tag = '<%swxGlade replace %s %s>' % (self.nonce, classname, classname)
if not prev_src.replace( tag, "".join(source_buffer) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s::%s block not found, relative code NOT generated" % (fmt_klass, fmt_klass) )
source_buffer = []
swrite = source_buffer.append
# generate code for event table
code_lines = self.generate_code_event_table( code_obj, is_new, tab, prev_src, event_handlers )
if prev_src and not is_new:
tag = '<%swxGlade replace %s event_table>' % (self.nonce, classname)
if not prev_src.replace( tag, "".join(code_lines) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s::event_table block not found, relative code NOT generated" % fmt_klass )
else:
source_buffer.extend(code_lines)
# generate code for event handler stubs
code_lines = self.generate_code_event_handler( code_obj, is_new, tab, prev_src, event_handlers )
# replace code inside existing event handlers
if prev_src and not is_new:
tag = '<%swxGlade add %s event handlers>' % (self.nonce, classname)
if not prev_src.replace( tag, "".join(code_lines) ):
# no constructor tag found, issue a warning and do nothing
self.warning( "wxGlade %s event handlers marker not found, relative code NOT generated" % fmt_klass )
else:
source_buffer.extend(code_lines)
if not self.multiple_files and prev_src:
# if this is a new class, add its code to the new_classes list of the SourceFileContent instance
if is_new:
prev_src.new_classes.append( ("".join(header_buffer), "".join(source_buffer)) )
return
if self.multiple_files:
if base in self.obj_builders:
klass.dependencies.update( getattr(self.obj_builders[base], 'import_modules', []) )
if prev_src:
tag = '<%swxGlade insert new_classes>' % self.nonce
prev_src.replace_header(tag, "")
# insert the module dependencies of this class
# WARNING: there's a double space ' ' between 'replace' and 'dependencies' in the tag below,
# because there is no class name (see SourceFileContent, line ~147)
tag = '<%swxGlade replace dependencies>' % self.nonce
code = self._format_dependencies(klass.dependencies)
prev_src.replace_header(tag, code)
# insert the extra code of this class
extra_code_h = "".join(klass.extra_code_h[::-1])
extra_code_cpp = "".join(klass.extra_code_cpp[::-1])
# if there's extra code but we are not overwriting existing sources, warn the user
if extra_code_h or extra_code_cpp:
self.warning( '%s (or one of its children) has extra code classes, but you are not overwriting '
'existing sources: please check that the resulting code is correct!' % code_obj.name )
extra_code_h = self._tagcontent("::extracode", extra_code_h)
extra_code_cpp = self._tagcontent("::extracode", extra_code_cpp)
tag = '<%swxGlade replace extracode>' % self.nonce
prev_src.replace_header(tag, extra_code_h)
prev_src.replace(tag, extra_code_cpp)
# store the new file contents to disk
name = os.path.join(self.out_dir, classname)
self.save_file( name +"."+ self.header_extension, "".join(prev_src.header_content), content_only=True )
self.save_file( name +"."+ self.source_extension, "".join(prev_src.content), content_only=True )
return
# create the new source file
header_file = os.path.join(self.out_dir, classname + "." + self.header_extension)
source_file = os.path.join(self.out_dir, classname + "." + self.source_extension)
hout = []
sout = []
# header file ----------------------------------------------------------------------------------------------
# isolation directives
hn = os.path.basename(header_file).upper().replace('.', '_')
hout.append('#ifndef %s\n#define %s\n' % (hn, hn))
hout.append('\n')
# write the common lines
hout.extend( self.header_lines )
hout.append('\n')
# write the module dependencies for this class
code = self._format_dependencies(klass.dependencies)
hout.append(code)
hout.append('\n')
# insert the extra code of this class
extra_code_h = "".join(klass.extra_code_h[::-1])
extra_code_h = self._tagcontent('::extracode', extra_code_h)
hout.append(extra_code_h)
hout.append('\n')
# write the class body
for line in header_buffer:
hout.append(line)
hout.append('\n#endif // %s\n' % hn)
# source file ----------------------------------------------------------------------------------------------
# write the common lines
sout.append(self.header_lines[0])
sout.append('#include "%s"\n\n' % os.path.basename(header_file))
# insert the extra code of this class
extra_code_cpp = "".join(klass.extra_code_cpp[::-1])
extra_code_cpp = self._tagcontent('::extracode', extra_code_cpp)
sout.append(extra_code_cpp)
sout.append('\n')
# write the class implementation
sout.extend(source_buffer)
# store source to disk
self.save_file(header_file, hout)
self.save_file(source_file, sout)
else: # not self.multiple_files
# write the class body onto the single source file
self.output_header.extend(header_buffer)
self.output_file.extend(source_buffer)
def add_object(self, klass, parent, parent_builder, obj):
# get the widget builder instance
builder = self._get_object_builder(klass, obj)
if not builder: return None
try:
init, ids, final = builder.get_code(obj)
except:
print(obj)
raise # this shouldn't happen
if not obj.IS_SIZER: # the object is a wxWindow instance
if obj.check_prop_truth("extracode_pre"):
init = obj.properties["extracode_pre"].get_lines() + init
if obj.check_prop_truth("extracode_post"):
init += obj.properties["extracode_post"].get_lines()
if obj.check_prop_truth('extraproperties'): # insert these only after extracode_post
init += self.generate_code_extraproperties(obj)
mycn = getattr(builder, 'cn', self.cn)
for win_id, evt, handler, evt_type in builder.get_event_handlers(obj):
klass.event_handlers.append( (win_id, mycn(evt), handler, evt_type) )
# try to see if there's some extra code to add to this class
extra_code = getattr(builder, 'extracode', getattr(obj, 'extracode', "") or "" )
if extra_code:
extra_code = re.sub(r'\\n', '\n', extra_code)
extra_code = re.split(re.compile(r'^###\s*$', re.M), extra_code, 1)
klass.extra_code_h.append(extra_code[0])
if len(extra_code) > 1:
klass.extra_code_cpp.append(extra_code[1])
# if we are not overwriting existing source, warn the user about the presence of extra code
if not self.multiple_files and self.previous_source:
self.warning( '%s has extra code, but you are not overwriting existing sources: please check '
'that the resulting code is correct!' % obj.name )
klass.ids.extend(ids)
if self.store_as_attr(obj):
if obj.check_prop("instance_class"):
klassname = obj.instance_class
else:
klassname = obj.get_prop_value("class", obj.WX_CLASS)
klass.sub_objs.append( (klassname, obj.name) )
klass.init.extend(init)
if parent_builder: # add to sizer or notebook
klass.init.extend( parent_builder.get_code_per_child(parent, obj) )
klass.final[:0] = final
if self.multiple_files and obj.IS_CLASS:
klass.dependencies.append(obj.klass)
else:
if obj.WX_CLASS in self.obj_builders:
headers = getattr(self.obj_builders[obj.WX_CLASS], 'import_modules', [])
klass.dependencies.update(headers)
return builder
def generate_code_event_handler(self, code_obj, is_new, tab, prev_src, event_handlers):
"""Generate the event handler stubs
Parameters:
code_obj: Object to generate code for (CodeObject)
is_new: Indicates if previous source code exists (bool)
tab: Indentation of function body (str)
prev_src: Previous source code (SourceFileContent)
event_handlers: List of event handlers
see: tmpl_func_event_stub"""
code_lines = []
swrite = code_lines.append
if not event_handlers:
return []
tmpl_handler = """
void %(klass)s::%(handler)s(%(evt_type)s &event) // wxGlade: %(klass)s.<event_handler>
{
%(tab)sevent.Skip();
%(tab)s// notify the user that he hasn't implemented the event handler yet
%(tab)swxLogDebug(wxT("Event handler (%(klass)s::%(handler)s) not implemented yet"));
}
"""
if prev_src:
already_there = prev_src.event_handlers.get(code_obj.klass, set())
else:
already_there = set()
for win_id, event, handler, evt_type in event_handlers:
if handler not in already_there:
swrite( tmpl_handler % {'evt_type': evt_type, 'handler': handler, 'klass': code_obj.klass, 'tab': tab} )
already_there.add( handler )
if is_new or not prev_src:
swrite('\n\n')
swrite('// wxGlade: add %s event handlers\n' % code_obj.klass)
if is_new or not prev_src:
swrite('\n')
return code_lines
def generate_code_event_table(self, code_obj, is_new, tab, prev_src, event_handlers):
"""Generate code for event table declaration.
code_obj: Object to generate code for (CodeObject)
is_new: Indicates if previous source code exists (bool)
tab: Indentation of function body (str)
prev_src: Previous source code (SourceFileContent)
event_handlers: List of event handlers (strings)"""
code_lines = []
write = code_lines.append
if not event_handlers:
return code_lines
if prev_src and code_obj.klass in prev_src.event_table_decl:
has_event_table = True
else:
has_event_table = False
if is_new or not has_event_table:
write('\nBEGIN_EVENT_TABLE(%s, %s)\n' % (code_obj.klass, code_obj.WX_CLASS))
write(tab + '// begin wxGlade: %s::event_table\n' % code_obj.klass)
for obj, event, handler, evt_type in event_handlers:
if obj is None: continue
if isinstance(obj, str):
win_id = obj
else:
win_id = self.generate_code_id(obj)[1]
if 'EVT_NAVIGATION_KEY' in event:
tmpl = '%(tab)s%(event)s(%(klass)s::%(handler)s)\n'
else:
tmpl = '%(tab)s%(event)s(%(win_id)s, %(klass)s::%(handler)s)\n'
details = { 'tab': tab, 'event': event, 'win_id': win_id, 'klass': code_obj.klass, 'handler': handler }
write(tmpl % details)
write(tab + '// end wxGlade\n')
if is_new or not has_event_table:
write('END_EVENT_TABLE();\n\n')
return code_lines
def generate_code_id(self, obj, id=None):
if id is None:
id = obj.window_id
if not id:
if obj is not None and obj.check_prop_truth("stockitem"):
return '', "wxID_" + obj.stockitem
return '', 'wxID_ANY'
id = str(id)
tokens = id.split('=', 1)
if len(tokens) != 2:
return '', tokens[0] # we assume name is declared elsewhere
name, val = tokens
if not name:
return '', val
name = name.strip()
val = val.strip()
if val == '?':
val = self.generated_ids.get(name)
if val is None:
val = 'wxID_HIGHEST + %d' % self.last_generated_id
self.last_generated_id += 1
self.generated_ids[name] = val
else:
val = val
return '%s = %s' % (name, val), name
def generate_code_size(self, obj):
objname = self.format_generic_access(obj)
if obj.IS_CLASS:
name2 = 'this'
else:
name2 = obj.name
size = obj.properties["size"].get_string_value()
use_dialog_units = (size[-1] == 'd')
method = 'SetMinSize' if obj.parent_window else 'SetSize'
if use_dialog_units:
return '%s%s(wxDLG_UNIT(%s, wxSize(%s)));\n' % (objname, method, name2, size[:-1])
return '%s%s(wxSize(%s));\n' % (objname, method, size)
def quote_path(self, s):
return 'wxT(%s)' % super(CPPCodeWriter, self).quote_path(s)
def _quote_str(self, s):
if self._use_gettext:
return '_("%s")' % s
return 'wxT("%s")' % s
def format_generic_access(self, obj):
if obj.IS_CLASS:
return ''
return '%s->' % obj.name
def _format_dependencies(self, dependencies):
"Format a list of header files for the dependencies output"
dep_list = []
for dependency in sorted(dependencies): # unique and sorted
if dependency and ('"' != dependency[0] != '<'):
dep_list.append('#include "%s.h"\n' % dependency)
else:
dep_list.append('#include %s\n' % dependency)
return self._tagcontent( '::dependencies', dep_list )
writer = CPPCodeWriter() # The code writer is an instance of CPPCodeWriter
language = writer.language # Language generated by this code generator
|
import sys
import os
from io import StringIO
from datetime import datetime
import unittest
from unittest.mock import patch
sys.path.append(os.path.abspath("./src/"))
from calendarApp.models import Event, Calendar
class CalendarModelTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data1 = {
"name": "Test Event 1",
"start_time": "01/01/2000 00:00:00",
"end_time": "01/01/2001 00:00:00"
}
cls.data2 = {
"name": "Test Event 2",
"start_time": "01/01/2001 00:00:00",
"end_time": "01/01/2002 00:00:00"
}
@classmethod
def tearDownClass(cls):
del cls.data1
del cls.data2
def setUp(self):
self.calendar = Calendar("Test")
def tearDown(self):
del self.calendar
def test_event_add(self):
# Test Configuration and Setup
with patch('sys.stdout', StringIO()) as print_output:
# Test Function
self.calendar.add_event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
calendar_event = self.calendar.schedule[0]
# Test Assertions
self.assertEqual(
f"[INFO] Event {self.data1['name']} added", print_output.getvalue().rstrip())
self.assertEqual(self.data1["name"], calendar_event.name)
def test_event_delete(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
calendar_event = self.calendar.schedule[0]
with patch('sys.stdout', StringIO()) as print_output:
# Test Function
self.calendar.delete_event([str(calendar_event.id)])
# Test Assertions
self.assertEqual(
f"[INFO] Event(s) ['{calendar_event.id}'] removed", print_output.getvalue().rstrip())
self.assertFalse(self.calendar.schedule)
def test_event_order(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"]),
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"])
]
# Test Function
self.calendar.order_events()
# Test Assertions
self.assertLess(
self.calendar.schedule[0].start_time, self.calendar.schedule[1].start_time)
def test_event_print(self):
# Test Configuration and Setup
self.calendar.schedule = [
Event(
self.data1["name"], self.data1["start_time"], self.data1["end_time"]),
Event(
self.data2["name"], self.data2["start_time"], self.data2["end_time"])
]
# Test Assertions
with patch('sys.stdout', StringIO()) as print_output:
self.calendar.print_events()
self.assertTrue(self.data1["name"] in print_output.getvalue())
self.assertTrue(self.data2["name"] in print_output.getvalue())
if __name__ == "__main__":
unittest.main()
|
"""Visual Studio Helper Utils."""
#===============================================================================
# Imports
#===============================================================================
import uuid
#===============================================================================
# Globals
#===============================================================================
vcxproj_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGInstrument|Win32">
<Configuration>PGInstrument</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGInstrument|x64">
<Configuration>PGInstrument</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGUpdate|Win32">
<Configuration>PGUpdate</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="PGUpdate|x64">
<Configuration>PGUpdate</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>%(guid)s</ProjectGuid>
<RootNamespace>%(name)s</RootNamespace>
<Keyword>Win32Proj</Keyword>
</PropertyGroup>
<PropertyGroup Label="UserMacros">
<%(dirname_macro_name)s>%(dirname_macro_value)s</%(dirname_macro_name)s>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
<WholeProgramOptimization>true</WholeProgramOptimization>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<CharacterSet>NotSet</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)spgupdate.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)spginstrument.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(pcbuild_prefix)s%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(name)s_debug.props" />
<Import Project="%(pcbuild_prefix)spyd_d.props" />
<Import Project="%(pcbuild_prefix)sdebug.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
<Import Project="%(pcbuild_prefix)spgupdate.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
<Import Project="%(pcbuild_prefix)spginstrument.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(pcbuild_prefix)spyd.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
<Import Project="%(name)s.props" />
<Import Project="%(name)s_debug.props" />
<Import Project="%(pcbuild_prefix)spyd_d.props" />
<Import Project="%(pcbuild_prefix)sdebug.props" />
<Import Project="%(pcbuild_prefix)sx64.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
<CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
<CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
<CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGInstrument|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|Win32'">
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='PGUpdate|x64'">
<Midl>
<TargetEnvironment>X64</TargetEnvironment>
</Midl>
<ClCompile>
<AdditionalIncludeDirectories>%%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>%(exception_handling)s
</ClCompile>
</ItemDefinitionGroup>
%(includes)s
%(compiles)s
%(resources)s
%(others)s<ItemGroup>
<ProjectReference Include="pythoncore.vcxproj">
<Project>{cf7ac3d1-e2df-41d2-bea6-1e2556cdea26}</Project>
</ProjectReference>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>"""
vcxproj_filters_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
%(source_filterdef)s
%(include_filterdef)s
%(resource_filterdef)s
%(python_filterdef)s
%(cython_filterdef)s
%(other_filterdef)s
</ItemGroup>
%(source_filters)s
%(include_filters)s
%(resource_filters)s
%(python_filters)s
%(cython_filters)s
%(other_filters)s
</Project>"""
props_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>%(compiles_props)s%(additional_include_dirs)s
</ClCompile>
<ResourceCompile>%(resources_props)s
</ResourceCompile>
</ItemDefinitionGroup>
</Project>"""
props_debug_template = """\
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>%(compiles_debug_props)s%(additional_include_dirs)s
</ClCompile>
<ResourceCompile>%(resources_debug_props)s
</ResourceCompile>
</ItemDefinitionGroup>
</Project>"""
guids_template = """\
guid = '%s'
source_filterdef_guid = '%s'
include_filterdef_guid = '%s'
other_filterdef_guid = '%s'
python_filterdef_guid = '%s'
cython_filterdef_guid = '%s'
resource_filterdef_guid = '%s'
"""
num_guids = guids_template.count('%s')
#===============================================================================
# Helper Methods
#===============================================================================
def gen_guids():
t = guids_template
uuids = [
'{%s}' % str(uuid.uuid1()).upper()
for _ in range(0, num_guids)
]
return t % tuple(uuids)
#===============================================================================
# Helper Classes
#===============================================================================
# vim:set ts=8 sw=4 sts=4 tw=0 et :
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt") as f:
required = f.read().splitlines()
setuptools.setup(
name="pymeritrade",
version="0.1.4",
author="Shrivu Shankar",
author_email="shrivu1122+pymeritrade@gmail.com",
description="A Python API for TD Ameritrade.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/sshh12/pymeritrade",
packages=setuptools.find_packages(),
install_requires=required,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
import threading
import socket
import sys
import getopt
from log import logger
from Codecs.AdcpCodec import AdcpCodec
from Comm.AdcpSerialPortServer import AdcpSerialPortServer
class DecodeSerialData:
def __init__(self, tcp_port, comm_port, baud):
"""
Initialize the thread to read the data from the TCP port.
"""
self.is_alive = True
self.raw_serial_socket = None
self.serial_server_thread = None
# Create the codec
self.codec = AdcpCodec()
# Create a serial port server to read data from the
# serial port and pass it on TCP
self.serial_server = AdcpSerialPortServer(str(tcp_port),
comm_port,
baud)
# Start a tcp connection to monitor incoming data and decode
self.serial_server_thread = threading.Thread(name='AdcpDecoder', target=self.create_raw_serial_socket(tcp_port))
self.serial_server_thread.start()
def create_raw_serial_socket(self, port):
"""
Connect to the ADCP serial server. This TCP server outputs data from
the serial port. Start reading the data.
"""
try:
# Create socket
self.raw_serial_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.raw_serial_socket.connect(('localhost', int(port)))
self.raw_serial_socket.settimeout(1) # Set timeout to stop thread if terminated
# Start to read the raw data
self.read_tcp_socket()
except ConnectionRefusedError as err:
logger.error("Serial Send Socket: ", err)
exit()
except Exception as err:
logger.error('Serial Send Socket: ", Error Opening socket', err)
exit()
def read_tcp_socket(self):
"""
Read the data from the TCP port. This is the raw data from the serial port.
"""
while self.is_alive:
try:
# Read data from socket
data = self.raw_serial_socket.recv(4096)
# If data exist process
if len(data) > 0:
# Send the data received to the codec
self.codec.add(data)
except socket.timeout:
# Just a socket timeout, continue on
pass
except Exception as e:
logger.error("Exception in reading data.", e)
self.stop_adcp_server()
print("Read Thread turned off")
def stop_adcp_server(self):
"""
Stop the ADCP Serial TCP server
"""
# Stop the thread loop
self.is_alive = False
if self.serial_server is not None:
self.serial_server.close()
logger.debug("serial server stopped")
else:
logger.debug('No serial connection')
# Close the socket
self.raw_serial_socket.close()
# Stop the server thread
if self.serial_server_thread is not None:
self.serial_server_thread.join()
# Close the open file
self.close_file_write()
logger.debug("Stop the Recorder")
def main(argv):
tcp_port = "55056"
comm_port = '/dev/tty.usbserial-FT0ED8ZR'
baud = 115200
try:
opts, args = getopt.getopt(argv,"hlt:c:b:", [])
except getopt.GetoptError:
print('test_DecodeSerialData.py -t <tcp_port> -c <comm> -b <baud>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test_DecodeSerialData.py -t <tcp_port> -c <comm> -b <baud>')
sys.exit()
elif opt in ("-l"):
print("Available Serial Ports:")
AdcpSerialPortServer.list_serial_ports()
exit()
elif opt in ('-t'):
tcp_port = arg
elif opt in ("-c"):
comm_port = arg
elif opt in ("-b"):
baud = int(arg)
# Get a list of all the serial ports available
print("Available Serial Ports:")
serial_list = AdcpSerialPortServer.list_serial_ports()
print("TCP Port: " + tcp_port)
print("Comm Port: " + comm_port)
print("Baud rate: " + str(baud))
# Verify a good serial port was given
if comm_port in serial_list:
# Run serial port
sdr = DecodeSerialData(tcp_port, comm_port, baud)
sdr.stop_adcp_server()
else:
print("----------------------------------------------------------------")
print("BAD SERIAL PORT GIVEN")
print("Please use -c to give a good serial port.")
print("-l will give you a list of all available serial ports.")
if __name__ == "__main__":
main(sys.argv[1:])
|
r"""Setup fixtures for testing :py:class:`lmp.model.LSTMModel`."""
import pytest
import torch
from lmp.model import LSTMModel
from lmp.tknzr import BaseTknzr
@pytest.fixture
def lstm_model(
tknzr: BaseTknzr,
d_emb: int,
d_hid: int,
n_hid_lyr: int,
n_pre_hid_lyr: int,
n_post_hid_lyr: int,
p_emb: float,
p_hid: float,
) -> LSTMModel:
r"""Example ``LSTMModel`` instance."""
return LSTMModel(
d_emb=d_emb,
d_hid=d_hid,
n_hid_lyr=n_hid_lyr,
n_pre_hid_lyr=n_pre_hid_lyr,
n_post_hid_lyr=n_post_hid_lyr,
p_emb=p_emb,
p_hid=p_hid,
tknzr=tknzr,
)
@pytest.fixture
def batch_prev_tkids(lstm_model: LSTMModel) -> torch.Tensor:
r"""Example input batch of token ids."""
# Shape: (2, 3).
return torch.randint(
low=0,
high=lstm_model.emb.num_embeddings,
size=(2, 3),
)
@pytest.fixture
def batch_next_tkids(
lstm_model: LSTMModel,
batch_prev_tkids: torch.Tensor,
) -> torch.Tensor:
r"""Example target batch of token ids."""
# Same shape as `batch_prev_tkids`.
return torch.cat(
[
batch_prev_tkids[..., :-1],
torch.randint(
low=0,
high=lstm_model.emb.num_embeddings,
size=(batch_prev_tkids.shape[0], 1),
),
],
dim=1,
)
|
"""add cityname indexes for filtering
Revision ID: b4eea63fd165
Revises: 850af1d21f5e
Create Date: 2022-05-05 17:39:57.826059
"""
import os
from alembic import op
here = os.path.dirname(os.path.realpath(__file__))
# revision identifiers, used by Alembic.
revision = "b4eea63fd165"
down_revision = "850af1d21f5e"
branch_labels = None
depends_on = None
revision_dir = f"{here}/{revision}"
# idea from https://github.com/tbobm/alembic-sequeled
def process_migration(script_name: str):
filename = f"{revision_dir}/{script_name}.sql"
query = "\n".join(open(filename))
if len(query) > 0:
op.execute(query)
def upgrade():
process_migration("upgrade")
def downgrade():
process_migration("downgrade")
|
#
# Generated with ExtremeValueBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from sima.sima.blueprints.moao import MOAOBlueprint
class ExtremeValueBlueprint(MOAOBlueprint):
""""""
def __init__(self, name="ExtremeValue", package_path="sima/metocean", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("extreme","number","",default=0.0))
self.attributes.append(Attribute("returnPeriod","number","",default=0.0))
|
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
def get_args():
# create argument parser
parser = argparse.ArgumentParser()
# parameter for problem
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--benchmark_id', type=int, default=0)
parser.add_argument('--rmp', type=float, default=0.3)
# parse args
args = parser.parse_args()
# add other args
return args
ROOT = '../../result'
def load(args):
folder = os.path.join(ROOT, '{}/{}_{}'.format(args.benchmark_id, args.algorithm, args.rmp))
Fitness = []
for name in os.listdir(folder):
path = os.path.join(folder, name)
if 'ucb' in name:
y = np.load(path)
Fitness.append(y)
return np.array(Fitness)
def get_label(args):
return '{}_{}'.format(args.algorithm, args.benchmark_id)
def plot(Fitness, args):
cs = [
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['r', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'r', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'r', 'r', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'r', 'b', 'b', 'b', 'b', 'b'],
['b', 'b', 'b', 'b', 'b', 'b', 'b', 'b', 'b'],
]
label = get_label(args)
Fitness = Fitness[:, :, args.source]
mean_fitness = np.mean(Fitness, axis=0)
i = 0
for target in range(mean_fitness.shape[1]):
if target != args.source:
plt.plot(mean_fitness[:, target], label='T{}'.format(target+1), color=cs[args.source][i], linewidth=0.3)
plt.ylabel('UCB value')
i += 1
def main():
# get args
args = get_args()
# plot each algorithm
args.algorithm = 'MTO'
Fitness = load(args)
for source in range(10):
args.source = source
plot(Fitness, args)
plt.legend()
plt.ylim((0, 2))
plt.savefig('plot/ucb/{}.eps'.format(source + 1), dpi=300)
plt.savefig('plot/ucb/{}.png'.format(source + 1), dpi=300)
plt.clf()
plt.cla()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
"""Main function for the DOVPN project."""
import argparse
import logging
import os
import yaml
import vpnorchestrator
def main():
"""Main function that sets up script to run.
Handles arguments, logging, and configuration before passing of control
to the orchestrator object."""
parser = argparse.ArgumentParser(description='Manage a DigitalOcean VPN.')
parser.add_argument('-c', '--config', default="config.yaml",
help='configuration file location')
parser.add_argument('-r', '--remove', action='store_true',
help='remove all related DigitalOcean droplets and keys, and quit')
parser.add_argument('-v', '--verbose', action='store_true',
help="enable verbose output")
parser.add_argument('-d', '--debug', action='store_true',
help="enable verbose output with HTTP requests (implies -v)")
args = parser.parse_args()
log_format = "%(asctime)s %(levelname)8s: %(message)s"
if args.debug:
import http.client as http_client
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig(format=log_format, level=logging.DEBUG)
elif args.verbose:
logging.basicConfig(format=log_format, level=logging.DEBUG)
else:
logging.basicConfig(format=log_format, level=logging.INFO)
if os.geteuid() != 0:
logging.critical("You are not root!")
exit(1)
if not os.path.isfile(args.config):
logging.critical("Config file {} does not exist.".format(args.config))
exit(1)
logging.info("Loading configuration file {}".format(args.config))
with open(args.config, "r") as config_file:
config_yaml = yaml.load(config_file, Loader=yaml.FullLoader)
if args.remove:
logging.info("Removing all DigitalOcean droplets and keys")
orch = vpnorchestrator.VpnOrchestrator(config_yaml)
orch.clean()
exit(0)
try:
orch = vpnorchestrator.VpnOrchestrator(config_yaml)
orch.start()
orch.wait()
orch.teardown()
except Exception as ex:
orch.teardown()
raise ex
if __name__ == "__main__":
main()
|
from argparse import Namespace
import csv
from logging import Logger
import os
from pprint import pformat
from typing import List
import numpy as np
from tensorboardX import SummaryWriter
import torch
from tqdm import trange
import pickle
from torch.optim.lr_scheduler import ExponentialLR
from .evaluate import evaluate, evaluate_predictions
from .predict import predict, save_predictions
from .train import train
from chemprop.data import StandardScaler
from chemprop.data.utils import flip_data, get_class_sizes, get_data, get_task_names, split_data, split_loocv
from chemprop.models import build_model
from chemprop.nn_utils import param_count
from chemprop.utils import build_optimizer, build_lr_scheduler, get_loss_func, get_metric_func, load_checkpoint,\
makedirs, save_checkpoint
def run_training(args: Namespace, logger: Logger = None) -> List[float]:
"""
Trains a model and returns test scores on the model checkpoint with the highest validation score.
:param args: Arguments.
:param logger: Logger.
:return: A list of ensemble scores for each task.
"""
if logger is not None:
debug, info = logger.debug, logger.info
else:
debug = info = print
# Set GPU
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
# Print args
debug(pformat(vars(args)))
# Get data
debug('Loading data')
args.task_names = get_task_names(args.data_path, args.data_format)
data = get_data(path=args.data_path, args=args, logger=logger)
args.num_tasks = data.num_tasks()
args.features_size = data.features_size()
debug(f'Number of tasks = {args.num_tasks}')
# Split data
debug(f'Splitting data with seed {args.seed}')
if args.separate_test_path:
test_data = get_data(path=args.separate_test_path, args=args, features_path=args.separate_test_features_path, logger=logger)
if args.separate_val_path:
val_data = get_data(path=args.separate_val_path, args=args, features_path=args.separate_val_features_path, logger=logger)
if args.separate_val_path and args.separate_test_path:
train_data = data
elif args.separate_val_path:
train_data, _, test_data = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.0, 0.2), seed=args.seed, args=args, logger=logger)
elif args.separate_test_path:
train_data, val_data, _ = split_data(data=data, split_type=args.split_type, sizes=(0.8, 0.2, 0.0), seed=args.seed, args=args, logger=logger)
elif args.split_type == 'loocv':
train_data, val_data, test_data = split_loocv(data=data, args=args, logger=logger)
else:
train_data, val_data, test_data = split_data(data=data, split_type=args.split_type, sizes=args.split_sizes, seed=args.seed, args=args, logger=logger)
if args.dataset_type == 'classification':
class_sizes = get_class_sizes(test_data)
debug('Class sizes in test set')
for i, task_class_sizes in enumerate(class_sizes):
debug(f'{args.task_names[i]} '
f'{", ".join(f"{cls}: {size * 100:.2f}%" for cls, size in enumerate(task_class_sizes))}')
if not args.train_all and task_class_sizes == 0: # TODO: only works for just 1 property prediction task
debug('Moved to next epoch due to homogenous targets in test set.')
return [float('nan')]
if args.save_smiles_splits:
with open(args.data_path, 'r') as f:
reader = csv.reader(f)
header = next(reader)
lines_by_smiles = {}
indices_by_smiles = {}
for i, line in enumerate(reader):
smiles = (line[0], line[1])
lines_by_smiles[smiles] = line
indices_by_smiles[smiles] = i
all_split_indices = []
for dataset, name in [(train_data, 'train'), (val_data, 'val'), (test_data, 'test')]:
with open(os.path.join(args.save_dir, name + '_smiles.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(['smiles'])
for smiles in dataset.smiles():
writer.writerow([smiles])
with open(os.path.join(args.save_dir, name + '_full.csv'), 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
for smiles in dataset.smiles():
writer.writerow(lines_by_smiles[smiles])
split_indices = []
for smiles in dataset.smiles():
split_indices.append(indices_by_smiles[smiles])
split_indices = sorted(split_indices)
all_split_indices.append(split_indices)
with open(os.path.join(args.save_dir, 'split_indices.pckl'), 'wb') as f:
pickle.dump(all_split_indices, f)
if args.symmetric:
train_data = flip_data(train_data)
if args.features_scaling:
drug_scaler, cmpd_scaler = train_data.normalize_features(replace_nan_token=0)
val_data.normalize_features(drug_scaler, cmpd_scaler)
test_data.normalize_features(drug_scaler, cmpd_scaler)
else:
drug_scaler, cmpd_scaler = None, None
args.train_data_size = len(train_data)
debug(f'Total size = {len(data):,} | '
f'train size = {len(train_data):,} | val size = {len(val_data):,} | test size = {len(test_data):,}')
# Initialize scaler and scale training targets by subtracting mean and dividing standard deviation (regression only)
if args.dataset_type == 'regression':
debug('Fitting scaler')
train_smiles, train_targets = train_data.smiles(), train_data.targets()
scaler = StandardScaler().fit(train_targets)
scaled_targets = scaler.transform(train_targets).tolist()
train_data.set_targets(scaled_targets)
else:
scaler = None
# Get loss and metric functions
loss_func = get_loss_func(args)
metric_func = get_metric_func(metric=args.metric)
# Set up test set evaluation
test_smiles, test_targets = test_data.smiles(), test_data.targets()
if args.dataset_type == 'multiclass':
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks, args.multiclass_num_classes))
else:
sum_test_preds = np.zeros((len(test_smiles), args.num_tasks))
# Train ensemble of models
for model_idx in range(args.ensemble_size):
# Tensorboard writer
save_dir = os.path.join(args.save_dir, f'model_{model_idx}')
makedirs(save_dir)
try:
writer = SummaryWriter(log_dir=save_dir)
except:
writer = SummaryWriter(logdir=save_dir)
# Load/build model
if args.checkpoint_paths is not None:
debug(f'Loading model {model_idx} from {args.checkpoint_paths[model_idx]}')
model = load_checkpoint(args.checkpoint_paths[model_idx], current_args=args, logger=logger)
else:
debug(f'Building model {model_idx}')
model = build_model(args)
debug(model)
debug(f'Number of parameters = {param_count(model):,}')
if args.cuda:
debug('Moving model to cuda')
model = model.cuda()
# Ensure that model is saved in correct location for evaluation if 0 epochs
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Optimizers
optimizer = build_optimizer(model, args)
# Learning rate schedulers
scheduler = build_lr_scheduler(optimizer, args)
# Run training
best_score = float('inf') if args.minimize_score else -float('inf')
best_epoch, n_iter = 0, 0
for epoch in trange(args.epochs):
debug(f'Epoch {epoch}')
n_iter = train(
model=model,
data=train_data,
loss_func=loss_func,
optimizer=optimizer,
scheduler=scheduler,
args=args,
n_iter=n_iter,
logger=logger,
writer=writer
)
if isinstance(scheduler, ExponentialLR):
scheduler.step()
val_scores, val_loss = evaluate(
model=model,
data=val_data,
loss_func=loss_func,
num_tasks=args.num_tasks,
metric_func=metric_func,
batch_size=args.batch_size,
dataset_type=args.dataset_type,
scaler=scaler,
logger=logger
)
# Average validation score
avg_val_score = np.nanmean(val_scores)
debug(f'Validation {args.metric} = {avg_val_score:.6f}')
writer.add_scalar(f'validation_{args.metric}', avg_val_score, n_iter)
debug(f'Validation loss = {val_loss:.6f}')
writer.add_scalar(f'validation_loss', val_loss, n_iter)
if args.show_individual_scores:
# Individual validation scores
for task_name, val_score in zip(args.task_names, val_scores):
debug(f'Validation {task_name} {args.metric} = {val_score:.6f}')
writer.add_scalar(f'validation_{task_name}_{args.metric}', val_score, n_iter)
# Save model checkpoint if improved validation score
if args.minimize_score and avg_val_score < best_score or \
not args.minimize_score and avg_val_score > best_score:
best_score, best_epoch = avg_val_score, epoch
save_checkpoint(os.path.join(save_dir, 'model.pt'), model, scaler, drug_scaler, cmpd_scaler, args)
# Evaluate on test set using model with best validation score
info(f'Model {model_idx} best validation {args.metric} = {best_score:.6f} on epoch {best_epoch}')
model = load_checkpoint(os.path.join(save_dir, 'model.pt'), cuda=args.cuda, logger=logger)
test_preds = predict(
model=model,
data=test_data,
batch_size=args.batch_size,
scaler=scaler
)
if args.save_preds:
val_preds = predict(model=model, data=val_data, batch_size=args.batch_size, scaler=scaler)
train_preds = predict(model=model, data=train_data, batch_size=args.batch_size, scaler=scaler)
save_predictions(save_dir, train_data, val_data, test_data, \
train_preds, val_preds, test_preds, args.task_names, scaler)
test_scores = evaluate_predictions(
preds=test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
if len(test_preds) != 0:
sum_test_preds += np.array(test_preds)
# Average test score
avg_test_score = np.nanmean(test_scores)
info(f'Model {model_idx} test {args.metric} = {avg_test_score:.6f}')
writer.add_scalar(f'test_{args.metric}', avg_test_score, 0)
if args.show_individual_scores:
# Individual test scores
for task_name, test_score in zip(args.task_names, test_scores):
info(f'Model {model_idx} test {task_name} {args.metric} = {test_score:.6f}')
writer.add_scalar(f'test_{task_name}_{args.metric}', test_score, n_iter)
# Evaluate ensemble on test set
avg_test_preds = (sum_test_preds / args.ensemble_size).tolist()
ensemble_scores = evaluate_predictions(
preds=avg_test_preds,
targets=test_targets,
num_tasks=args.num_tasks,
metric_func=metric_func,
dataset_type=args.dataset_type,
logger=logger
)
# Average ensemble score
avg_ensemble_test_score = np.nanmean(ensemble_scores)
info(f'Ensemble test {args.metric} = {avg_ensemble_test_score:.6f}')
writer.add_scalar(f'ensemble_test_{args.metric}', avg_ensemble_test_score, 0)
# Individual ensemble scores
if args.show_individual_scores:
for task_name, ensemble_score in zip(args.task_names, ensemble_scores):
info(f'Ensemble test {task_name} {args.metric} = {ensemble_score:.6f}')
return ensemble_scores
|
from functools import partial
import numpy as np
from skimage import img_as_float, img_as_uint
from skimage import color, data, filters
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
# Down-sample image for quicker testing.
COLOR_IMAGE = data.astronaut()[::5, ::6]
GRAY_IMAGE = data.camera()[::5, ::5]
SIGMA = 3
smooth = partial(filters.gaussian, sigma=SIGMA)
assert_allclose = partial(np.testing.assert_allclose, atol=1e-8)
@adapt_rgb(each_channel)
def edges_each(image):
return filters.sobel(image)
@adapt_rgb(each_channel)
def smooth_each(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(each_channel)
def mask_each(image, mask):
result = image.copy()
result[mask] = 0
return result
@adapt_rgb(hsv_value)
def edges_hsv(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def smooth_hsv(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv_uint(image):
return img_as_uint(filters.sobel(image))
def test_gray_scale_image():
# We don't need to test both `hsv_value` and `each_channel` since
# `adapt_rgb` is handling gray-scale inputs.
assert_allclose(edges_each(GRAY_IMAGE), filters.sobel(GRAY_IMAGE))
def test_each_channel():
filtered = edges_each(COLOR_IMAGE)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i]))
assert_allclose(channel, expected)
def test_each_channel_with_filter_argument():
filtered = smooth_each(COLOR_IMAGE, SIGMA)
for i, channel in enumerate(np.rollaxis(filtered, axis=-1)):
assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
def test_each_channel_with_asymmetric_kernel():
mask = np.triu(np.ones(COLOR_IMAGE.shape[:2], dtype=np.bool_))
mask_each(COLOR_IMAGE, mask)
def test_hsv_value():
filtered = edges_hsv(COLOR_IMAGE)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filters.sobel(value))
def test_hsv_value_with_filter_argument():
filtered = smooth_hsv(COLOR_IMAGE, SIGMA)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], smooth(value))
def test_hsv_value_with_non_float_output():
# Since `rgb2hsv` returns a float image and the result of the filtered
# result is inserted into the HSV image, we want to make sure there isn't
# a dtype mismatch.
filtered = edges_hsv_uint(COLOR_IMAGE)
filtered_value = color.rgb2hsv(filtered)[:, :, 2]
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
# Reduce tolerance because dtype conversion.
assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)
|
""" Pooling-based Vision Transformer (PiT) in PyTorch
A PyTorch implement of Pooling-based Vision Transformers as described in
'Rethinking Spatial Dimensions of Vision Transformers' - https://arxiv.org/abs/2103.16302
This code was adapted from the original version at https://github.com/naver-ai/pit, original copyright below.
Modifications for timm by / Copyright 2020 Ross Wightman
"""
# PiT
# Copyright 2021-present NAVER Corp.
# Apache License v2.0
import math
import re
from functools import partial
from typing import Tuple
import torch
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import trunc_normal_, to_2tuple
from .registry import register_model
from .vision_transformer import Block
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# deit models (FB weights)
'pit_ti_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_730.pth'),
'pit_xs_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_781.pth'),
'pit_s_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_809.pth'),
'pit_b_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_820.pth'),
'pit_ti_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_ti_distill_746.pth',
classifier=('head', 'head_dist')),
'pit_xs_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_xs_distill_791.pth',
classifier=('head', 'head_dist')),
'pit_s_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_s_distill_819.pth',
classifier=('head', 'head_dist')),
'pit_b_distilled_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-pit-weights/pit_b_distill_840.pth',
classifier=('head', 'head_dist')),
}
class SequentialTuple(nn.Sequential):
""" This module exists to work around torchscript typing issues list -> list"""
def __init__(self, *args):
super(SequentialTuple, self).__init__(*args)
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for module in self:
x = module(x)
return x
class Transformer(nn.Module):
def __init__(
self, base_dim, depth, heads, mlp_ratio, pool=None, drop_rate=.0, attn_drop_rate=.0, drop_path_prob=None):
super(Transformer, self).__init__()
self.layers = nn.ModuleList([])
embed_dim = base_dim * heads
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim,
num_heads=heads,
mlp_ratio=mlp_ratio,
qkv_bias=True,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=drop_path_prob[i],
norm_layer=partial(nn.LayerNorm, eps=1e-6)
)
for i in range(depth)])
self.pool = pool
def forward(self, x: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
x, cls_tokens = x
B, C, H, W = x.shape
token_length = cls_tokens.shape[1]
x = x.flatten(2).transpose(1, 2)
x = torch.cat((cls_tokens, x), dim=1)
x = self.blocks(x)
cls_tokens = x[:, :token_length]
x = x[:, token_length:]
x = x.transpose(1, 2).reshape(B, C, H, W)
if self.pool is not None:
x, cls_tokens = self.pool(x, cls_tokens)
return x, cls_tokens
class ConvHeadPooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(ConvHeadPooling, self).__init__()
self.conv = nn.Conv2d(
in_feature, out_feature, kernel_size=stride + 1, padding=stride // 2, stride=stride,
padding_mode=padding_mode, groups=in_feature)
self.fc = nn.Linear(in_feature, out_feature)
def forward(self, x, cls_token) -> Tuple[torch.Tensor, torch.Tensor]:
x = self.conv(x)
cls_token = self.fc(cls_token)
return x, cls_token
class ConvEmbedding(nn.Module):
def __init__(self, in_channels, out_channels, patch_size, stride, padding):
super(ConvEmbedding, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size=patch_size, stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.conv(x)
return x
class PoolingVisionTransformer(nn.Module):
""" Pooling-based Vision Transformer
A PyTorch implement of 'Rethinking Spatial Dimensions of Vision Transformers'
- https://arxiv.org/abs/2103.16302
"""
def __init__(self, img_size, patch_size, stride, base_dims, depth, heads,
mlp_ratio, num_classes=1000, in_chans=3, distilled=False,
attn_drop_rate=.0, drop_rate=.0, drop_path_rate=.0):
super(PoolingVisionTransformer, self).__init__()
padding = 0
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
height = math.floor((img_size[0] + 2 * padding - patch_size[0]) / stride + 1)
width = math.floor((img_size[1] + 2 * padding - patch_size[1]) / stride + 1)
self.base_dims = base_dims
self.heads = heads
self.num_classes = num_classes
self.num_tokens = 2 if distilled else 1
self.patch_size = patch_size
self.pos_embed = nn.Parameter(torch.randn(1, base_dims[0] * heads[0], height, width))
self.patch_embed = ConvEmbedding(in_chans, base_dims[0] * heads[0], patch_size, stride, padding)
self.cls_token = nn.Parameter(torch.randn(1, self.num_tokens, base_dims[0] * heads[0]))
self.pos_drop = nn.Dropout(p=drop_rate)
transformers = []
# stochastic depth decay rule
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depth)).split(depth)]
for stage in range(len(depth)):
pool = None
if stage < len(heads) - 1:
pool = ConvHeadPooling(
base_dims[stage] * heads[stage], base_dims[stage + 1] * heads[stage + 1], stride=2)
transformers += [Transformer(
base_dims[stage], depth[stage], heads[stage], mlp_ratio, pool=pool,
drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_prob=dpr[stage])
]
self.transformers = SequentialTuple(*transformers)
self.norm = nn.LayerNorm(base_dims[-1] * heads[-1], eps=1e-6)
self.num_features = self.embed_dim = base_dims[-1] * heads[-1]
# Classifier head
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
if self.head_dist is not None:
return self.head, self.head_dist
else:
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.head_dist is not None:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
x = self.pos_drop(x + self.pos_embed)
cls_tokens = self.cls_token.expand(x.shape[0], -1, -1)
x, cls_tokens = self.transformers((x, cls_tokens))
cls_tokens = self.norm(cls_tokens)
if self.head_dist is not None:
return cls_tokens[:, 0], cls_tokens[:, 1]
else:
return cls_tokens[:, 0]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
return x, x_dist
else:
return (x + x_dist) / 2
else:
return self.head(x)
def checkpoint_filter_fn(state_dict, model):
""" preprocess checkpoints """
out_dict = {}
p_blocks = re.compile(r'pools\.(\d)\.')
for k, v in state_dict.items():
# FIXME need to update resize for PiT impl
# if k == 'pos_embed' and v.shape != model.pos_embed.shape:
# # To resize pos embedding when using model at different size from pretrained weights
# v = resize_pos_embed(v, model.pos_embed)
k = p_blocks.sub(lambda exp: f'transformers.{int(exp.group(1))}.pool.', k)
out_dict[k] = v
return out_dict
def _create_pit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
PoolingVisionTransformer, variant, pretrained,
default_cfg=default_cfgs[variant],
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
@register_model
def pit_b_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_b_224', pretrained, **model_kwargs)
@register_model
def pit_s_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_s_224', pretrained, **model_kwargs)
@register_model
def pit_xs_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_xs_224', pretrained, **model_kwargs)
@register_model
def pit_ti_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
**kwargs
)
return _create_pit('pit_ti_224', pretrained, **model_kwargs)
@register_model
def pit_b_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=14,
stride=7,
base_dims=[64, 64, 64],
depth=[3, 6, 4],
heads=[4, 8, 16],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_b_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_s_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[3, 6, 12],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_s_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_xs_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[48, 48, 48],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_xs_distilled_224', pretrained, **model_kwargs)
@register_model
def pit_ti_distilled_224(pretrained, **kwargs):
model_kwargs = dict(
patch_size=16,
stride=8,
base_dims=[32, 32, 32],
depth=[2, 6, 4],
heads=[2, 4, 8],
mlp_ratio=4,
distilled=True,
**kwargs
)
return _create_pit('pit_ti_distilled_224', pretrained, **model_kwargs)
|
from cloudshell.shell.core.driver_context import ResourceCommandContext, AutoLoadDetails, AutoLoadAttribute, \
AutoLoadResource
from collections import defaultdict
class LegacyUtils(object):
def __init__(self):
self._datamodel_clss_dict = self.__generate_datamodel_classes_dict()
def migrate_autoload_details(self, autoload_details, context):
model_name = context.resource.model
root_name = context.resource.name
root = self.__create_resource_from_datamodel(model_name, root_name)
attributes = self.__create_attributes_dict(autoload_details.attributes)
self.__attach_attributes_to_resource(attributes, '', root)
self.__build_sub_resoruces_hierarchy(root, autoload_details.resources, attributes)
return root
def __create_resource_from_datamodel(self, model_name, res_name):
return self._datamodel_clss_dict[model_name](res_name)
def __create_attributes_dict(self, attributes_lst):
d = defaultdict(list)
for attribute in attributes_lst:
d[attribute.relative_address].append(attribute)
return d
def __build_sub_resoruces_hierarchy(self, root, sub_resources, attributes):
d = defaultdict(list)
for resource in sub_resources:
splitted = resource.relative_address.split('/')
parent = '' if len(splitted) == 1 else resource.relative_address.rsplit('/', 1)[0]
rank = len(splitted)
d[rank].append((parent, resource))
self.__set_models_hierarchy_recursively(d, 1, root, '', attributes)
def __set_models_hierarchy_recursively(self, dict, rank, manipulated_resource, resource_relative_addr, attributes):
if rank not in dict: # validate if key exists
pass
for (parent, resource) in dict[rank]:
if parent == resource_relative_addr:
sub_resource = self.__create_resource_from_datamodel(
resource.model.replace(' ', ''),
resource.name)
self.__attach_attributes_to_resource(attributes, resource.relative_address, sub_resource)
manipulated_resource.add_sub_resource(
self.__slice_parent_from_relative_path(parent, resource.relative_address), sub_resource)
self.__set_models_hierarchy_recursively(
dict,
rank + 1,
sub_resource,
resource.relative_address,
attributes)
def __attach_attributes_to_resource(self, attributes, curr_relative_addr, resource):
for attribute in attributes[curr_relative_addr]:
setattr(resource, attribute.attribute_name.lower().replace(' ', '_'), attribute.attribute_value)
del attributes[curr_relative_addr]
def __slice_parent_from_relative_path(self, parent, relative_addr):
if parent is '':
return relative_addr
return relative_addr[len(parent) + 1:] # + 1 because we want to remove the seperator also
def __generate_datamodel_classes_dict(self):
return dict(self.__collect_generated_classes())
def __collect_generated_classes(self):
import sys, inspect
return inspect.getmembers(sys.modules[__name__], inspect.isclass)
class AwsTfBackend(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype AwsTfBackend
"""
result = AwsTfBackend(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'AwsTfBackend'
@property
def bucket_name(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Bucket Name'] if 'Aws Tf Backend.Bucket Name' in self.attributes else None
@bucket_name.setter
def bucket_name(self, value):
"""
The name of the bucket to be used in order to save the state file
:type value: str
"""
self.attributes['Aws Tf Backend.Bucket Name'] = value
@property
def region_name(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Region Name'] if 'Aws Tf Backend.Region Name' in self.attributes else None
@region_name.setter
def region_name(self, value):
"""
The region in which the bucket resides
:type value: str
"""
self.attributes['Aws Tf Backend.Region Name'] = value
@property
def access_key(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Access Key'] if 'Aws Tf Backend.Access Key' in self.attributes else None
@access_key.setter
def access_key(self, value):
"""
AWS access key
:type value: string
"""
self.attributes['Aws Tf Backend.Access Key'] = value
@property
def secret_key(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Secret Key'] if 'Aws Tf Backend.Secret Key' in self.attributes else None
@secret_key.setter
def secret_key(self, value):
"""
AWS secret key
:type value: string
"""
self.attributes['Aws Tf Backend.Secret Key'] = value
@property
def cloud_provider(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Cloud Provider'] if 'Aws Tf Backend.Cloud Provider' in self.attributes else None
@cloud_provider.setter
def cloud_provider(self, value):
"""
In case Access Key and Secret Key were not filled - the keys from the cloud provider will be used.
:type value: str
"""
self.attributes['Aws Tf Backend.Cloud Provider'] = value
@property
def hide_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.hide_address'] if 'Aws Tf Backend.hide_address' in self.attributes else None
@hide_address.setter
def hide_address(self, value='true'):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.hide_address'] = value
@property
def user(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.User'] if 'Aws Tf Backend.User' in self.attributes else None
@user.setter
def user(self, value):
"""
User with administrative privileges
:type value: str
"""
self.attributes['Aws Tf Backend.User'] = value
@property
def password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Password'] if 'Aws Tf Backend.Password' in self.attributes else None
@password.setter
def password(self, value):
"""
:type value: string
"""
self.attributes['Aws Tf Backend.Password'] = value
@property
def enable_password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Enable Password'] if 'Aws Tf Backend.Enable Password' in self.attributes else None
@enable_password.setter
def enable_password(self, value):
"""
The enable password is required by some CLI protocols such as Telnet and is required according to the device configuration.
:type value: string
"""
self.attributes['Aws Tf Backend.Enable Password'] = value
@property
def power_management(self):
"""
:rtype: bool
"""
return self.attributes['Aws Tf Backend.Power Management'] if 'Aws Tf Backend.Power Management' in self.attributes else None
@power_management.setter
def power_management(self, value=True):
"""
Used by the power management orchestration, if enabled, to determine whether to automatically manage the device power status. Enabled by default.
:type value: bool
"""
self.attributes['Aws Tf Backend.Power Management'] = value
@property
def sessions_concurrency_limit(self):
"""
:rtype: float
"""
return self.attributes['Aws Tf Backend.Sessions Concurrency Limit'] if 'Aws Tf Backend.Sessions Concurrency Limit' in self.attributes else None
@sessions_concurrency_limit.setter
def sessions_concurrency_limit(self, value='1'):
"""
The maximum number of concurrent sessions that the driver will open to the device. Default is 1 (no concurrency).
:type value: float
"""
self.attributes['Aws Tf Backend.Sessions Concurrency Limit'] = value
@property
def snmp_read_community(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.SNMP Read Community'] if 'Aws Tf Backend.SNMP Read Community' in self.attributes else None
@snmp_read_community.setter
def snmp_read_community(self, value):
"""
The SNMP Read-Only Community String is like a password. It is sent along with each SNMP Get-Request and allows (or denies) access to device.
:type value: string
"""
self.attributes['Aws Tf Backend.SNMP Read Community'] = value
@property
def snmp_write_community(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.SNMP Write Community'] if 'Aws Tf Backend.SNMP Write Community' in self.attributes else None
@snmp_write_community.setter
def snmp_write_community(self, value):
"""
The SNMP Write Community String is like a password. It is sent along with each SNMP Set-Request and allows (or denies) chaning MIBs values.
:type value: string
"""
self.attributes['Aws Tf Backend.SNMP Write Community'] = value
@property
def snmp_v3_user(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP V3 User'] if 'Aws Tf Backend.SNMP V3 User' in self.attributes else None
@snmp_v3_user.setter
def snmp_v3_user(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP V3 User'] = value
@property
def snmp_v3_password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.SNMP V3 Password'] if 'Aws Tf Backend.SNMP V3 Password' in self.attributes else None
@snmp_v3_password.setter
def snmp_v3_password(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: string
"""
self.attributes['Aws Tf Backend.SNMP V3 Password'] = value
@property
def snmp_v3_private_key(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP V3 Private Key'] if 'Aws Tf Backend.SNMP V3 Private Key' in self.attributes else None
@snmp_v3_private_key.setter
def snmp_v3_private_key(self, value):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP V3 Private Key'] = value
@property
def snmp_v3_authentication_protocol(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP V3 Authentication Protocol'] if 'Aws Tf Backend.SNMP V3 Authentication Protocol' in self.attributes else None
@snmp_v3_authentication_protocol.setter
def snmp_v3_authentication_protocol(self, value='No Authentication Protocol'):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP V3 Authentication Protocol'] = value
@property
def snmp_v3_privacy_protocol(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP V3 Privacy Protocol'] if 'Aws Tf Backend.SNMP V3 Privacy Protocol' in self.attributes else None
@snmp_v3_privacy_protocol.setter
def snmp_v3_privacy_protocol(self, value='No Privacy Protocol'):
"""
Relevant only in case SNMP V3 is in use.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP V3 Privacy Protocol'] = value
@property
def snmp_version(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.SNMP Version'] if 'Aws Tf Backend.SNMP Version' in self.attributes else None
@snmp_version.setter
def snmp_version(self, value=''):
"""
The version of SNMP to use. Possible values are v1, v2c and v3.
:type value: str
"""
self.attributes['Aws Tf Backend.SNMP Version'] = value
@property
def enable_snmp(self):
"""
:rtype: bool
"""
return self.attributes['Aws Tf Backend.Enable SNMP'] if 'Aws Tf Backend.Enable SNMP' in self.attributes else None
@enable_snmp.setter
def enable_snmp(self, value=True):
"""
If set to True and SNMP isn???t enabled yet in the device the Shell will automatically enable SNMP in the device when Autoload command is called. SNMP must be enabled on the device for the Autoload command to run successfully. True by default.
:type value: bool
"""
self.attributes['Aws Tf Backend.Enable SNMP'] = value
@property
def disable_snmp(self):
"""
:rtype: bool
"""
return self.attributes['Aws Tf Backend.Disable SNMP'] if 'Aws Tf Backend.Disable SNMP' in self.attributes else None
@disable_snmp.setter
def disable_snmp(self, value=False):
"""
If set to True SNMP will be disabled automatically by the Shell after the Autoload command execution is completed. False by default.
:type value: bool
"""
self.attributes['Aws Tf Backend.Disable SNMP'] = value
@property
def console_server_ip_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Console Server IP Address'] if 'Aws Tf Backend.Console Server IP Address' in self.attributes else None
@console_server_ip_address.setter
def console_server_ip_address(self, value):
"""
The IP address of the console server, in IPv4 format.
:type value: str
"""
self.attributes['Aws Tf Backend.Console Server IP Address'] = value
@property
def console_user(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Console User'] if 'Aws Tf Backend.Console User' in self.attributes else None
@console_user.setter
def console_user(self, value):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.Console User'] = value
@property
def console_port(self):
"""
:rtype: float
"""
return self.attributes['Aws Tf Backend.Console Port'] if 'Aws Tf Backend.Console Port' in self.attributes else None
@console_port.setter
def console_port(self, value):
"""
The port on the console server, usually TCP port, which the device is associated with.
:type value: float
"""
self.attributes['Aws Tf Backend.Console Port'] = value
@property
def console_password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Console Password'] if 'Aws Tf Backend.Console Password' in self.attributes else None
@console_password.setter
def console_password(self, value):
"""
:type value: string
"""
self.attributes['Aws Tf Backend.Console Password'] = value
@property
def cli_connection_type(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.CLI Connection Type'] if 'Aws Tf Backend.CLI Connection Type' in self.attributes else None
@cli_connection_type.setter
def cli_connection_type(self, value='Auto'):
"""
The CLI connection type that will be used by the driver. Possible values are Auto, Console, SSH, Telnet and TCP. If Auto is selected the driver will choose the available connection type automatically. Default value is Auto.
:type value: str
"""
self.attributes['Aws Tf Backend.CLI Connection Type'] = value
@property
def cli_tcp_port(self):
"""
:rtype: float
"""
return self.attributes['Aws Tf Backend.CLI TCP Port'] if 'Aws Tf Backend.CLI TCP Port' in self.attributes else None
@cli_tcp_port.setter
def cli_tcp_port(self, value):
"""
TCP Port to user for CLI connection. If kept empty a default CLI port will be used based on the chosen protocol, for example Telnet will use port 23.
:type value: float
"""
self.attributes['Aws Tf Backend.CLI TCP Port'] = value
@property
def backup_location(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Backup Location'] if 'Aws Tf Backend.Backup Location' in self.attributes else None
@backup_location.setter
def backup_location(self, value):
"""
Used by the save/restore orchestration to determine where backups should be saved.
:type value: str
"""
self.attributes['Aws Tf Backend.Backup Location'] = value
@property
def backup_type(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Backup Type'] if 'Aws Tf Backend.Backup Type' in self.attributes else None
@backup_type.setter
def backup_type(self, value='File System'):
"""
Supported protocols for saving and restoring of configuration and firmware files. Possible values are 'File System' 'FTP' and 'TFTP'. Default value is 'File System'.
:type value: str
"""
self.attributes['Aws Tf Backend.Backup Type'] = value
@property
def backup_user(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.Backup User'] if 'Aws Tf Backend.Backup User' in self.attributes else None
@backup_user.setter
def backup_user(self, value):
"""
Username for the storage server used for saving and restoring of configuration and firmware files.
:type value: str
"""
self.attributes['Aws Tf Backend.Backup User'] = value
@property
def backup_password(self):
"""
:rtype: string
"""
return self.attributes['Aws Tf Backend.Backup Password'] if 'Aws Tf Backend.Backup Password' in self.attributes else None
@backup_password.setter
def backup_password(self, value):
"""
Password for the storage server used for saving and restoring of configuration and firmware files.
:type value: string
"""
self.attributes['Aws Tf Backend.Backup Password'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def system_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.System Name'] if 'CS_GenericResource.System Name' in self.attributes else None
@system_name.setter
def system_name(self, value):
"""
A unique identifier for the device, if exists in the device terminal/os.
:type value: str
"""
self.attributes['CS_GenericResource.System Name'] = value
@property
def vendor(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Vendor'] if 'CS_GenericResource.Vendor' in self.attributes else None
@vendor.setter
def vendor(self, value=''):
"""
The name of the device manufacture.
:type value: str
"""
self.attributes['CS_GenericResource.Vendor'] = value
@property
def contact_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Contact Name'] if 'CS_GenericResource.Contact Name' in self.attributes else None
@contact_name.setter
def contact_name(self, value):
"""
The name of a contact registered in the device.
:type value: str
"""
self.attributes['CS_GenericResource.Contact Name'] = value
@property
def location(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Location'] if 'CS_GenericResource.Location' in self.attributes else None
@location.setter
def location(self, value=''):
"""
The device physical location identifier. For example Lab1/Floor2/Row5/Slot4.
:type value: str
"""
self.attributes['CS_GenericResource.Location'] = value
@property
def model(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Model'] if 'CS_GenericResource.Model' in self.attributes else None
@model.setter
def model(self, value=''):
"""
The device model. This information is typically used for abstract resource filtering.
:type value: str
"""
self.attributes['CS_GenericResource.Model'] = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_GenericResource.Model Name'] if 'CS_GenericResource.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_GenericResource.Model Name'] = value
class ResourcePort(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend.ResourcePort'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype ResourcePort
"""
result = ResourcePort(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'ResourcePort'
@property
def mac_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.ResourcePort.MAC Address'] if 'Aws Tf Backend.ResourcePort.MAC Address' in self.attributes else None
@mac_address.setter
def mac_address(self, value=''):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.ResourcePort.MAC Address'] = value
@property
def ipv4_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.ResourcePort.IPv4 Address'] if 'Aws Tf Backend.ResourcePort.IPv4 Address' in self.attributes else None
@ipv4_address.setter
def ipv4_address(self, value):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.ResourcePort.IPv4 Address'] = value
@property
def ipv6_address(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.ResourcePort.IPv6 Address'] if 'Aws Tf Backend.ResourcePort.IPv6 Address' in self.attributes else None
@ipv6_address.setter
def ipv6_address(self, value):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.ResourcePort.IPv6 Address'] = value
@property
def port_speed(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.ResourcePort.Port Speed'] if 'Aws Tf Backend.ResourcePort.Port Speed' in self.attributes else None
@port_speed.setter
def port_speed(self, value):
"""
The port speed (e.g 10Gb/s, 40Gb/s, 100Mb/s)
:type value: str
"""
self.attributes['Aws Tf Backend.ResourcePort.Port Speed'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_Port.Model Name'] if 'CS_Port.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_Port.Model Name'] = value
class GenericPowerPort(object):
def __init__(self, name):
"""
"""
self.attributes = {}
self.resources = {}
self._cloudshell_model_name = 'Aws Tf Backend.GenericPowerPort'
self._name = name
def add_sub_resource(self, relative_path, sub_resource):
self.resources[relative_path] = sub_resource
@classmethod
def create_from_context(cls, context):
"""
Creates an instance of NXOS by given context
:param context: cloudshell.shell.core.driver_context.ResourceCommandContext
:type context: cloudshell.shell.core.driver_context.ResourceCommandContext
:return:
:rtype GenericPowerPort
"""
result = GenericPowerPort(name=context.resource.name)
for attr in context.resource.attributes:
result.attributes[attr] = context.resource.attributes[attr]
return result
def create_autoload_details(self, relative_path=''):
"""
:param relative_path:
:type relative_path: str
:return
"""
resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name,
name=self.resources[r].name,
relative_address=self._get_relative_path(r, relative_path))
for r in self.resources]
attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes]
autoload_details = AutoLoadDetails(resources, attributes)
for r in self.resources:
curr_path = relative_path + '/' + r if relative_path else r
curr_auto_load_details = self.resources[r].create_autoload_details(curr_path)
autoload_details = self._merge_autoload_details(autoload_details, curr_auto_load_details)
return autoload_details
def _get_relative_path(self, child_path, parent_path):
"""
Combines relative path
:param child_path: Path of a model within it parent model, i.e 1
:type child_path: str
:param parent_path: Full path of parent model, i.e 1/1. Might be empty for root model
:type parent_path: str
:return: Combined path
:rtype str
"""
return parent_path + '/' + child_path if parent_path else child_path
@staticmethod
def _merge_autoload_details(autoload_details1, autoload_details2):
"""
Merges two instances of AutoLoadDetails into the first one
:param autoload_details1:
:type autoload_details1: AutoLoadDetails
:param autoload_details2:
:type autoload_details2: AutoLoadDetails
:return:
:rtype AutoLoadDetails
"""
for attribute in autoload_details2.attributes:
autoload_details1.attributes.append(attribute)
for resource in autoload_details2.resources:
autoload_details1.resources.append(resource)
return autoload_details1
@property
def cloudshell_model_name(self):
"""
Returns the name of the Cloudshell model
:return:
"""
return 'GenericPowerPort'
@property
def model(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.GenericPowerPort.Model'] if 'Aws Tf Backend.GenericPowerPort.Model' in self.attributes else None
@model.setter
def model(self, value):
"""
The device model. This information is typically used for abstract resource filtering.
:type value: str
"""
self.attributes['Aws Tf Backend.GenericPowerPort.Model'] = value
@property
def serial_number(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.GenericPowerPort.Serial Number'] if 'Aws Tf Backend.GenericPowerPort.Serial Number' in self.attributes else None
@serial_number.setter
def serial_number(self, value):
"""
:type value: str
"""
self.attributes['Aws Tf Backend.GenericPowerPort.Serial Number'] = value
@property
def version(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.GenericPowerPort.Version'] if 'Aws Tf Backend.GenericPowerPort.Version' in self.attributes else None
@version.setter
def version(self, value):
"""
The firmware version of the resource.
:type value: str
"""
self.attributes['Aws Tf Backend.GenericPowerPort.Version'] = value
@property
def port_description(self):
"""
:rtype: str
"""
return self.attributes['Aws Tf Backend.GenericPowerPort.Port Description'] if 'Aws Tf Backend.GenericPowerPort.Port Description' in self.attributes else None
@port_description.setter
def port_description(self, value):
"""
The description of the port as configured in the device.
:type value: str
"""
self.attributes['Aws Tf Backend.GenericPowerPort.Port Description'] = value
@property
def name(self):
"""
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
"""
:type value: str
"""
self._name = value
@property
def cloudshell_model_name(self):
"""
:rtype: str
"""
return self._cloudshell_model_name
@cloudshell_model_name.setter
def cloudshell_model_name(self, value):
"""
:type value: str
"""
self._cloudshell_model_name = value
@property
def model_name(self):
"""
:rtype: str
"""
return self.attributes['CS_PowerPort.Model Name'] if 'CS_PowerPort.Model Name' in self.attributes else None
@model_name.setter
def model_name(self, value=''):
"""
The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model.
:type value: str
"""
self.attributes['CS_PowerPort.Model Name'] = value
|
'''
This code is based on pytorch_ssd and RFBNet.
Details about the modules:
TUM - Thinned U-shaped Module
MLFPN - Multi-Level Feature Pyramid Network
M2Det - Multi-level Multi-scale single-shot object Detector
Author: Qijie Zhao (zhaoqijie@pku.edu.cn)
Finished Date: 01/17/2019
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
import warnings
warnings.filterwarnings('ignore')
from ..registry import NECKS
from ..utils import ConvModule
class TUM(nn.Module):
def __init__(self, first_level=True, input_planes=128, is_smooth=True, side_channel=512, scales=6,
conv_cfg=None,
norm_cfg=None
):
super(TUM, self).__init__()
self.is_smooth = is_smooth
self.side_channel = side_channel
self.input_planes = input_planes
self.planes = 2 * self.input_planes
self.first_level = first_level
self.scales = scales
self.in1 = input_planes + side_channel if not first_level else input_planes
self.layers = nn.Sequential()
self.layers.add_module('{}'.format(len(self.layers)), ConvModule(self.in1, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
for i in range(self.scales - 2):
if not i == self.scales - 3:
self.layers.add_module(
'{}'.format(len(self.layers)),
ConvModule(self.planes, self.planes, 3, 2, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
else:
self.layers.add_module(
'{}'.format(len(self.layers)),
ConvModule(self.planes, self.planes, 3, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.toplayer = nn.Sequential(ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
self.latlayer = nn.Sequential()
for i in range(self.scales - 2):
self.latlayer.add_module(
'{}'.format(len(self.latlayer)),
ConvModule(self.planes, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.latlayer.add_module('{}'.format(len(self.latlayer)), ConvModule(self.in1, self.planes, 3, 1, 1,conv_cfg=conv_cfg,norm_cfg=norm_cfg))
if self.is_smooth:
smooth = list()
for i in range(self.scales - 1):
smooth.append(
ConvModule(self.planes, self.planes, 1, 1, 0,conv_cfg=conv_cfg,norm_cfg=norm_cfg)
)
self.smooth = nn.Sequential(*smooth)
def _upsample_add(self, x, y, fuse_type='interp'):
_, _, H, W = y.size()
if fuse_type == 'interp':
return F.interpolate(x, size=(H, W), mode='nearest') + y
else:
raise NotImplementedError
# return nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
def forward(self, x, y):
if not self.first_level:
x = torch.cat([x, y], 1)
conved_feat = [x]
for i in range(len(self.layers)):
x = self.layers[i](x)
conved_feat.append(x)
deconved_feat = [self.toplayer[0](conved_feat[-1])]
for i in range(len(self.latlayer)):
deconved_feat.append(
self._upsample_add(
deconved_feat[i], self.latlayer[i](conved_feat[len(self.layers) - 1 - i])
)
)
if self.is_smooth:
smoothed_feat = [deconved_feat[0]]
for i in range(len(self.smooth)):
smoothed_feat.append(
self.smooth[i](deconved_feat[i + 1])
)
return smoothed_feat
return deconved_feat
class SFAM(nn.Module):
def __init__(self, planes, num_levels, num_scales, compress_ratio=16):
super(SFAM, self).__init__()
self.planes = planes
self.num_levels = num_levels
self.num_scales = num_scales
self.compress_ratio = compress_ratio
self.fc1 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels,
self.planes * self.num_levels // 16,
1, 1, 0)] * self.num_scales)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.ModuleList([nn.Conv2d(self.planes * self.num_levels // 16,
self.planes * self.num_levels,
1, 1, 0)] * self.num_scales)
self.sigmoid = nn.Sigmoid()
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
attention_feat = []
for i, _mf in enumerate(x):
_tmp_f = self.avgpool(_mf)
_tmp_f = self.fc1[i](_tmp_f)
_tmp_f = self.relu(_tmp_f)
_tmp_f = self.fc2[i](_tmp_f)
_tmp_f = self.sigmoid(_tmp_f)
attention_feat.append(_mf * _tmp_f)
return attention_feat
@NECKS.register_module
class M2FPN(nn.Module):
def __init__(self,
num_levels = 8,
num_scales = 5,
sfam=False,
smooth=True,
in_channels = [512,2048],
out_channels=256, conv_cfg=None,
norm_cfg=None):
'''
M2Det: Multi-level Multi-scale single-shot object Detector
'''
super(M2FPN,self).__init__()
self.planes = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.num_levels = num_levels
self.num_scales = num_scales
self.sfam = sfam
self.smooth = smooth
self.in_channels = in_channels
self.shallow_out =256
self.deep_out =512
self.construct_modules()
def construct_modules(self,):
# construct tums
for i in range(self.num_levels):
if i == 0:
setattr(self,
'unet{}'.format(i+1),
TUM(first_level=True,
input_planes=self.planes//2,
is_smooth=self.smooth,
scales=self.num_scales,
side_channel=512)) #side channel isn't fixed.
else:
setattr(self,
'unet{}'.format(i+1),
TUM(first_level=False,
input_planes=self.planes//2,
is_smooth=self.smooth,
scales=self.num_scales,
side_channel=self.planes))
self.reduce= ConvModule(self.in_channels[0], self.shallow_out, kernel_size=3, stride=1, padding=1)
self.up_reduce_1= ConvModule(self.in_channels[2], self.in_channels[1], kernel_size=1, stride=1)
self.up_reduce_2= ConvModule(self.in_channels[1], self.deep_out, kernel_size=1, stride=1)
self.Norm = nn.BatchNorm2d(256*8)
self.leach = nn.ModuleList([ConvModule(
self.deep_out+self.shallow_out,
self.planes//2,
kernel_size=(1,1),stride=(1,1))]*self.num_levels)
# construct localization and recognition layers
conv_out = nn.ModuleList()
for i in range(self.num_scales):
conv_out.append(nn.Conv2d(self.planes*self.num_levels,
self.planes,
3, 1, 1))
self.conv_out = nn.ModuleList(conv_out)
# construct SFAM module
if self.sfam:
self.sfam_module = SFAM(self.planes, self.num_levels, self.num_scales, compress_ratio=16)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self,x):
assert len(x)==len(self.in_channels)
# loc,conf = list(),list()
# base_feats = list()
# if 'vgg' in self.net_family:
# for k in range(len(self.base)):
# x = self.base[k](x)
# if k in self.base_out:
# base_feats.append(x)
# elif 'res' in self.net_family:
# base_feats = self.base(x, self.base_out)
up_feats = x[1] + F.interpolate(self.up_reduce_1(x[2]),scale_factor=2,mode='nearest')
base_feature = torch.cat(
(self.reduce(x[0]), F.interpolate(self.up_reduce_2(up_feats),scale_factor=2,mode='nearest')),1
)
# tum_outs is the multi-level multi-scale feature
tum_outs = [getattr(self, 'unet{}'.format(1))(self.leach[0](base_feature), 'none')]
for i in range(1,self.num_levels,1):
tum_outs.append(
getattr(self, 'unet{}'.format(i+1))(
self.leach[i](base_feature), tum_outs[i-1][-1]
)
)
# concat with same scales
sources = [torch.cat([_fx[i-1] for _fx in tum_outs],1) for i in range(self.num_scales, 0, -1)]
# forward_sfam
if self.sfam:
sources = self.sfam_module(sources)
sources[0] = self.Norm(sources[0])
output = []
for (x,cout) in zip(sources, self.conv_out):
output.append(cout(x))
return tuple(output)
|
from .orm import metadata, start_mappers
|
from scipy.io.wavfile import read
import numpy as np
import io
import csv
class TensorFlowPredictor:
def __init__(self, tensorflow_client, config):
self.client = tensorflow_client
self.class_names = self.class_names_from_csv("class_names.csv")
def class_names_from_csv(self, csv_file):
class_names = []
with open(csv_file, "r", newline="") as f:
for row in csv.reader(f, delimiter=","):
class_names.append(row[2])
return class_names
def predict(self, payload):
rate, data = read(io.BytesIO(payload))
assert rate == 16000
result = self.client.predict({"waveform": np.array(data, dtype=np.float32)})
scores = np.array(result["output_0"]).reshape((-1, 521))
predicted_class = self.class_names[scores.mean(axis=0).argmax() + 1]
return predicted_class
|
#!/usr/bin/env python
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
#
##
# Title: run.py
#
# Notes:
#
# Universal script to setup and run the xunit console runner. The script relies
# on run.proj and the bash and batch wrappers. All test excludes will also
# come from issues.targets. If there is a jit stress or gc stress exclude,
# please add GCStressIncompatible or JitOptimizationSensitive to the test's
# ilproj or csproj.
#
# The xunit runner currently relies on tests being built on the same host as the
# target platform. This requires all tests run on linux x64 to be built by the
# same platform and arch. If this is not done, the tests will run correctly;
# however, expect failures due to incorrect exclusions in the xunit
# wrappers setup at build time.
#
# Note that for linux targets the native components to the tests are still built
# by the product build. This requires all native components to be either copied
# into the Core_Root directory or the test's managed directory. The latter is
# prone to failure; however, copying into the Core_Root directory may create
# naming conflicts.
#
# If you are running tests on a different target than the host that built, the
# native tests components must be copied from:
# artifacts/obj/<OS>.<Arch>.<BuildType>/tests to the target. If the location is not
# standard please pass the -test_native_bin_location flag to the script.
#
# Use the instructions here:
# https://github.com/dotnet/runtime/blob/master/docs/workflow/testing/coreclr/windows-test-instructions.md
# https://github.com/dotnet/runtime/blob/master/docs/workflow/testing/coreclr/unix-test-instructions.md
#
################################################################################
################################################################################
import argparse
import datetime
import fnmatch
import json
import math
import os
import platform
import shutil
import subprocess
import sys
import tempfile
import time
import re
import string
import zipfile
import xml.etree.ElementTree
from collections import defaultdict
from sys import platform as _platform
# Version specific imports
if sys.version_info.major < 3:
import urllib
else:
import urllib.request
# Import coreclr_arguments.py from src\coreclr\scripts
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "coreclr", "scripts"))
from coreclr_arguments import *
################################################################################
# Argument Parser
################################################################################
description = ("""Universal script to setup and run the xunit console runner. The script relies
on run.proj and the bash and batch wrappers. All test excludes will also
come from issues.targets. If there is a jit stress or gc stress exclude,
please add GCStressIncompatible or JitOptimizationSensitive to the test's
ilproj or csproj.
The xunit runner currently relies on tests being built on the same host as the
target platform. This requires all tests run on linux x64 to be built by the
same platform and arch. If this is not done, the tests will run correctly;
however, expect failures due to incorrect exclusions in the xunit
wrappers setup at build time.
Note that for linux targets the native components to the tests are still built
by the product build. This requires all native components to be either copied
into the Core_Root directory or the test's managed directory. The latter is
prone to failure; however, copying into the Core_Root directory may create
naming conflicts.
If you are running tests on a different target than the host that built, the
native tests components must be copied from:
artifacts/obj/<OS>.<Arch>.<BuildType>/tests to the target. If the location is not
standard please pass the -test_native_bin_location flag to the script.""")
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-os", dest="host_os", nargs='?', default=None)
parser.add_argument("-arch", dest="arch", nargs='?', default="x64")
parser.add_argument("-build_type", dest="build_type", nargs='?', default="Debug")
parser.add_argument("-test_location", dest="test_location", nargs="?", default=None)
parser.add_argument("-core_root", dest="core_root", nargs='?', default=None)
parser.add_argument("-product_location", dest="product_location", nargs='?', default=None)
parser.add_argument("-runtime_repo_location", dest="runtime_repo_location", default=os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
parser.add_argument("-test_env", dest="test_env", default=None)
parser.add_argument("-crossgen_altjit", dest="crossgen_altjit", default=None)
# Optional arguments which change execution.
# Rid is used only for restoring packages. This is a unspecified and undocumented
# environment variable that needs to be passed to build.proj. Do not use this
# unless you are attempting to target package restoration for another host/arch/os
parser.add_argument("-rid", dest="rid", nargs="?", default=None)
parser.add_argument("--il_link", dest="il_link", action="store_true", default=False)
parser.add_argument("--long_gc", dest="long_gc", action="store_true", default=False)
parser.add_argument("--gcsimulator", dest="gcsimulator", action="store_true", default=False)
parser.add_argument("--ilasmroundtrip", dest="ilasmroundtrip", action="store_true", default=False)
parser.add_argument("--run_crossgen_tests", dest="run_crossgen_tests", action="store_true", default=False)
parser.add_argument("--run_crossgen2_tests", dest="run_crossgen2_tests", action="store_true", default=False)
parser.add_argument("--large_version_bubble", dest="large_version_bubble", action="store_true", default=False)
parser.add_argument("--precompile_core_root", dest="precompile_core_root", action="store_true", default=False)
parser.add_argument("--skip_test_run", dest="skip_test_run", action="store_true", default=False, help="Does not run tests. Useful in conjunction with --precompile_core_root")
parser.add_argument("--sequential", dest="sequential", action="store_true", default=False)
parser.add_argument("--analyze_results_only", dest="analyze_results_only", action="store_true", default=False)
parser.add_argument("--verbose", dest="verbose", action="store_true", default=False)
parser.add_argument("--limited_core_dumps", dest="limited_core_dumps", action="store_true", default=False)
parser.add_argument("--run_in_context", dest="run_in_context", action="store_true", default=False)
# Only used on Unix
parser.add_argument("-test_native_bin_location", dest="test_native_bin_location", nargs='?', default=None)
################################################################################
# Globals
################################################################################
g_verbose = False
gc_stress = False
coredump_pattern = ""
file_name_cache = defaultdict(lambda: None)
################################################################################
# Classes
################################################################################
class TempFile:
def __init__(self, extension):
self.file = None
self.file_name = None
self.extension = extension
def __enter__(self):
self.file = tempfile.NamedTemporaryFile(delete=False, suffix=self.extension)
self.file_name = self.file.name
return self.file_name
def __exit__(self, exc_type, exc_val, exc_tb):
try:
os.remove(self.file_name)
except:
print("Error failed to delete: {}.".format(self.file_name))
class DebugEnv:
def __init__(self,
args,
env,
test):
""" Go through the failing tests and create repros for them
Args:
args
env : env for the repro
test ({}) : The test metadata
"""
self.unique_name = "%s_%s_%s_%s" % (test["name"],
args.host_os,
args.arch,
args.build_type)
self.args = args
self.env = env
self.test = test
self.test_location = test["test_path"]
self.__create_repro_wrapper__()
self.path = None
if self.args.host_os == "windows":
self.path = self.unique_name + ".cmd"
else:
self.path = self.unique_name + ".sh"
repro_location = os.path.join(self.args.artifacts_location, "repro", "%s.%s.%s" % (self.args.host_os, self.args.arch, self.args.build_type))
assert os.path.isdir(repro_location)
self.repro_location = repro_location
self.path = os.path.join(repro_location, self.path)
exe_location = os.path.splitext(self.test_location)[0] + ".exe"
if os.path.isfile(exe_location):
self.exe_location = exe_location
self.__add_configuration_to_launch_json__()
def __add_configuration_to_launch_json__(self):
""" Add to or create a launch.json with debug information for the test
Notes:
This will allow debugging using the cpp extension in vscode.
"""
repro_location = self.repro_location
assert os.path.isdir(repro_location)
vscode_dir = os.path.join(repro_location, ".vscode")
if not os.path.isdir(vscode_dir):
os.mkdir(vscode_dir)
assert os.path.isdir(vscode_dir)
launch_json_location = os.path.join(vscode_dir, "launch.json")
if not os.path.isfile(launch_json_location):
initial_json = {
"version": "0.2.0",
"configurations": []
}
json_str = json.dumps(initial_json,
indent=4,
separators=(',', ': '))
with open(launch_json_location, 'w') as file_handle:
file_handle.write(json_str)
launch_json = None
with open(launch_json_location) as file_handle:
launch_json = file_handle.read()
launch_json = json.loads(launch_json)
configurations = launch_json["configurations"]
dbg_type = "cppvsdbg" if self.host_os == "windows" else ""
env = {
"COMPlus_AssertOnNYI": "1",
"COMPlus_ContinueOnAssert": "0"
}
if self.env is not None:
# Convert self.env to a defaultdict
self.env = defaultdict(lambda: None, self.env)
for key, value in env.items():
self.env[key] = value
else:
self.env = env
environment = []
for key, value in self.env.items():
env = {
"name": key,
"value": value
}
environment.append(env)
configuration = defaultdict(lambda: None, {
"name": self.unique_name,
"type": dbg_type,
"request": "launch",
"program": self.args.corerun_path,
"args": [self.exe_location],
"stopAtEntry": False,
"cwd": os.path.join("${workspaceFolder}", "..", ".."),
"environment": environment,
"externalConsole": True
})
if self.args.build_type.lower() != "release":
symbol_path = os.path.join(self.args.core_root, "PDB")
configuration["symbolSearchPath"] = symbol_path
# Update configuration if it already exists.
config_exists = False
for index, config in enumerate(configurations):
if config["name"] == self.unique_name:
configurations[index] = configuration
config_exists = True
if not config_exists:
configurations.append(configuration)
json_str = json.dumps(launch_json,
indent=4,
separators=(',', ': '))
with open(launch_json_location, 'w') as file_handle:
file_handle.write(json_str)
def __create_repro_wrapper__(self):
""" Create the repro wrapper
"""
if self.args.host_os == "windows":
self.__create_batch_wrapper__()
else:
self.__create_bash_wrapper__()
def __create_batch_wrapper__(self):
""" Create a windows batch wrapper
"""
wrapper = \
"""@echo off
REM ============================================================================
REM Repro environment for %s
REM
REM Notes:
REM
REM This wrapper is automatically generated by run.py. It includes the
REM necessary environment to reproduce a failure that occured during running
REM the tests.
REM
REM In order to change how this wrapper is generated, see
REM run.py:__create_batch_wrapper__(). Please note that it is possible
REM to recreate this file by running src/tests/run.py --analyze_results_only
REM with the appropriate environment set and the correct arch and build_type
REM passed.
REM
REM ============================================================================
REM Set Core_Root if it has not been already set.
if "%%CORE_ROOT%%"=="" set CORE_ROOT=%s
echo Core_Root is set to: "%%CORE_ROOT%%"
""" % (self.unique_name, self.args.core_root)
line_sep = os.linesep
if self.env is not None:
for key, value in self.env.items():
wrapper += "echo set %s=%s%s" % (key, value, line_sep)
wrapper += "set %s=%s%s" % (key, value, line_sep)
wrapper += "%s" % line_sep
wrapper += "echo call %s%s" % (self.test_location, line_sep)
wrapper += "call %s%s" % (self.test_location, line_sep)
self.wrapper = wrapper
def __create_bash_wrapper__(self):
""" Create a unix bash wrapper
"""
wrapper = \
"""
#============================================================================
# Repro environment for %s
#
# Notes:
#
# This wrapper is automatically generated by run.py. It includes the
# necessary environment to reproduce a failure that occured during running
# the tests.
#
# In order to change how this wrapper is generated, see
# run.py:__create_bash_wrapper__(). Please note that it is possible
# to recreate this file by running src/tests/run.py --analyze_results_only
# with the appropriate environment set and the correct arch and build_type
# passed.
#
# ============================================================================
# Set Core_Root if it has not been already set.
if [ \"${CORE_ROOT}\" = \"\" ] || [ ! -z \"${CORE_ROOT}\" ]; then
export CORE_ROOT=%s
else
echo \"CORE_ROOT set to ${CORE_ROOT}\"
fi
""" % (self.unique_name, self.core_root)
line_sep = os.linesep
if self.env is not None:
for key, value in self.env.items():
wrapper += "echo export %s=%s%s" % (key, value, line_sep)
wrapper += "export %s=%s%s" % (key, value, line_sep)
wrapper += "%s" % line_sep
wrapper += "echo bash %s%s" % (self.test_location, line_sep)
wrapper += "bash %s%s" % (self.test_location, line_sep)
self.wrapper = wrapper
def write_repro(self):
""" Write out the wrapper
Notes:
This will check if the wrapper repros or not. If it does not repro
it will be put into an "unstable" folder under artifacts/repro.
Else it will just be written out.
"""
with open(self.path, 'w') as file_handle:
file_handle.write(self.wrapper)
################################################################################
# Helper Functions
################################################################################
def create_and_use_test_env(_os, env, func):
""" Create a test env based on the env passed
Args:
_os(str) : OS name
env(defaultdict(lambda: None)) : complus variables, key,value dict
func(lambda) : lambda to call, after creating the
: test_env
Notes:
Using the env passed, create a temporary file to use as the
test_env to be passed for run.cmd. Note that this only happens
on windows, until xunit is used on unix there is no managed code run
in run.sh.
"""
global gc_stress
ret_code = 0
complus_vars = defaultdict(lambda: None)
for key in env:
value = env[key]
if "complus" in key.lower() or "superpmi" in key.lower():
complus_vars[key] = value
if len(list(complus_vars.keys())) > 0:
print("Found COMPlus variables in the current environment")
print("")
contents = ""
# We can't use:
#
# with tempfile.NamedTemporaryFile() as test_env:
# ...
# return func(...)
#
# because on Windows Python locks the file, and trying to use it give you:
#
# The process cannot access the file because it is being used by another process.
#
# errors.
tempfile_suffix = ".bat" if _os == "windows" else ""
test_env = tempfile.NamedTemporaryFile(mode="w", suffix=tempfile_suffix, delete=False)
try:
file_header = None
if _os == "windows":
file_header = \
"""@REM Temporary test env for test run.
@echo on
"""
else:
file_header = \
"""# Temporary test env for test run.
"""
test_env.write(file_header)
contents += file_header
for key in complus_vars:
value = complus_vars[key]
command = None
if _os == "windows":
command = "set"
else:
command = "export"
if key.lower() == "complus_gcstress":
gc_stress = True
print("Unset %s" % key)
os.environ[key] = ""
# \n below gets converted to \r\n on Windows because the file is opened in text (not binary) mode
line = "%s %s=%s\n" % (command, key, value)
test_env.write(line)
contents += line
if _os == "windows":
file_suffix = \
"""@echo off
"""
test_env.write(file_suffix)
contents += file_suffix
test_env.close()
print("")
print("TestEnv: %s" % test_env.name)
print("")
print("Contents:")
print("")
print(contents)
print("")
ret_code = func(test_env.name)
finally:
os.remove(test_env.name)
else:
ret_code = func(None)
return ret_code
def get_environment(test_env=None):
""" Get all the COMPlus_* Environment variables
Notes:
All COMPlus variables need to be captured as a test_env script to avoid
influencing the test runner.
On Windows, os.environ keys (the environment variable names) are all upper case,
and map lookup is case-insensitive on the key.
"""
global gc_stress
complus_vars = defaultdict(lambda: "")
for key in os.environ:
if "complus" in key.lower():
complus_vars[key] = os.environ[key]
os.environ[key] = ''
elif "superpmi" in key.lower():
complus_vars[key] = os.environ[key]
os.environ[key] = ''
# Get the env from the test_env
if test_env is not None:
with open(test_env) as file_handle:
for item in file_handle.readlines():
key_split = item.split("=")
if len(key_split) == 1:
continue
key = key_split[0]
value = key_split[1]
key = key.split(" ")[-1]
value = value.strip()
try:
value = value.split(" ")[0]
except:
pass
complus_vars[key] = value
# Supoort looking up case insensitive.
complus_vars[key.lower()] = value
if "complus_gcstress" in complus_vars:
gc_stress = True
return complus_vars
def call_msbuild(args):
""" Call msbuild to run the tests built.
Args:
args
Notes:
At this point the environment should be setup correctly, including
the test_env, should it need to be passed.
"""
global g_verbose
common_msbuild_arguments = []
if args.sequential:
common_msbuild_arguments += ["/p:ParallelRun=none"]
if not os.path.isdir(args.logs_dir):
os.makedirs(args.logs_dir)
# Set up the directory for MSBuild debug logs.
msbuild_debug_logs_dir = os.path.join(args.logs_dir, "MsbuildDebugLogs")
if not os.path.isdir(msbuild_debug_logs_dir):
os.makedirs(msbuild_debug_logs_dir)
os.environ["MSBUILDDEBUGPATH"] = msbuild_debug_logs_dir
command = [args.dotnetcli_script_path,
"msbuild",
os.path.join(args.coreclr_tests_src_dir, "run.proj"),
"/p:Runtests=true",
"/clp:showcommandline"]
command += common_msbuild_arguments
if args.il_link:
command += ["/p:RunTestsViaIllink=true"]
if args.limited_core_dumps:
command += ["/p:LimitedCoreDumps=true"]
log_path = os.path.join(args.logs_dir, "TestRunResults_%s_%s_%s" % (args.host_os, args.arch, args.build_type))
build_log = log_path + ".log"
wrn_log = log_path + ".wrn"
err_log = log_path + ".err"
command += ["/fileloggerparameters:\"Verbosity=normal;LogFile=%s\"" % build_log,
"/fileloggerparameters1:\"WarningsOnly;LogFile=%s\"" % wrn_log,
"/fileloggerparameters2:\"ErrorsOnly;LogFile=%s\"" % err_log,
"/consoleloggerparameters:Summary"]
if g_verbose:
command += ["/verbosity:diag"]
command += ["/p:TargetOS=%s" % args.host_os,
"/p:TargetArchitecture=%s" % args.arch,
"/p:Configuration=%s" % args.build_type,
"/p:__LogsDir=%s" % args.logs_dir]
command += ["/bl:%s.binlog" % (log_path)]
print(" ".join(command))
sys.stdout.flush() # flush output before creating sub-process
proc = subprocess.Popen(command)
try:
proc.communicate()
except:
proc.kill()
sys.exit(1)
if args.limited_core_dumps:
inspect_and_delete_coredump_files(args.host_os, args.arch, args.test_location)
return proc.returncode
def setup_coredump_generation(host_os):
""" Configures the environment so that the current process and any child
processes can generate coredumps.
Args:
host_os (String) : os
Notes:
This is only support for OSX and Linux, it does nothing on Windows.
This will print a message if setting the rlimit fails but will otherwise
continue execution, as some systems will already be configured correctly
and it is not necessarily a failure to not collect coredumps.
"""
global coredump_pattern
if host_os == "OSX":
coredump_pattern = subprocess.check_output("sysctl -n kern.corefile", shell=True).rstrip()
elif host_os == "Linux":
with open("/proc/sys/kernel/core_pattern", "r") as f:
coredump_pattern = f.read().rstrip()
else:
print("CoreDump generation not enabled due to unsupported OS: %s" % host_os)
return
if isinstance(coredump_pattern, bytes):
print("Binary data found. Decoding.")
coredump_pattern = coredump_pattern.decode('ascii')
print("CoreDump Pattern: {}".format(coredump_pattern))
# resource is only available on Unix platforms
import resource
if coredump_pattern != "core" and coredump_pattern != "core.%P":
print("CoreDump generation not enabled due to unsupported coredump pattern: %s" % coredump_pattern)
return
else:
print("CoreDump pattern: %s" % coredump_pattern)
# We specify 'shell=True' as the command may otherwise fail (some systems will
# complain that the executable cannot be found in the current directory).
rlimit_core = subprocess.check_output("ulimit -c", shell=True).rstrip()
if rlimit_core != "unlimited":
try:
# This can fail on certain platforms. ARM64 in particular gives: "ValueError: not allowed to raise maximum limit"
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
except:
print("Failed to enable CoreDump generation. rlimit_core: %s" % rlimit_core)
return
rlimit_core = subprocess.check_output("ulimit -c", shell=True).rstrip()
if rlimit_core != "unlimited":
print("Failed to enable CoreDump generation. rlimit_core: %s" % rlimit_core)
return
print("CoreDump generation enabled")
if host_os == "Linux" and os.path.isfile("/proc/self/coredump_filter"):
# Include memory in private and shared file-backed mappings in the dump.
# This ensures that we can see disassembly from our shared libraries when
# inspecting the contents of the dump. See 'man core' for details.
with open("/proc/self/coredump_filter", "w") as f:
f.write("0x3F")
def print_info_from_coredump_file(host_os, arch, coredump_name, executable_name):
""" Prints information from the specified coredump to the console
Args:
host_os (String) : os
arch (String) : architecture
coredump_name (String) : name of the coredump to print
executable_name (String) : name of the executable that generated the coredump
Notes:
This is only support for OSX and Linux, it does nothing on Windows.
This defaults to lldb on OSX and gdb on Linux.
For both lldb and db, it backtraces all threads. For gdb, it also prints local
information for every frame. This option is not available as a built-in for lldb.
"""
if not os.path.isfile(executable_name):
print("Not printing coredump due to missing executable: %s" % executable_name)
return
if not os.path.isfile(coredump_name):
print("Not printing coredump due to missing coredump: %s" % coredump_name)
return
command = ""
if host_os == "OSX":
command = "lldb -c %s -b -o 'bt all' -o 'disassemble -b -p'" % coredump_name
elif host_os == "Linux":
command = "gdb --batch -ex \"thread apply all bt full\" -ex \"disassemble /r $pc\" -ex \"quit\" %s %s" % (executable_name, coredump_name)
else:
print("Not printing coredump due to unsupported OS: %s" % host_os)
return
print("Printing info from coredump: %s" % coredump_name)
proc_failed = False
try:
sys.stdout.flush() # flush output before creating sub-process
# We specify 'shell=True' as the command may otherwise fail (some systems will
# complain that the executable cannot be found in the current directory).
proc = subprocess.Popen(command, shell=True)
proc.communicate()
if proc.returncode != 0:
proc_failed = True
except:
proc_failed = True
if proc_failed:
print("Failed to print coredump: %s" % coredump_name)
def preserve_coredump_file(coredump_name, root_storage_location="/tmp/coredumps_coreclr"):
""" Copies the specified coredump to a new randomly named temporary directory under
root_storage_location to ensure it is accessible after the workspace is cleaned.
Args:
coredump_name (String) : name of the coredump to print
root_storage_location (String) : the directory under which to copy coredump_name
Notes:
root_storage_location defaults to a folder under /tmp to ensure that it is cleaned
up on next reboot (or after the OS configured time elapses for the folder).
"""
if not os.path.exists(root_storage_location):
os.mkdir(root_storage_location)
# This creates a temporary directory under `root_storage_location` to ensure it doesn'tag
# conflict with any coredumps from past runs.
storage_location = tempfile.mkdtemp('', '', root_storage_location)
# Only preserve the dump if the directory is empty. Otherwise, do nothing.
# This is a way to prevent us from storing/uploading too many dumps.
if os.path.isfile(coredump_name) and not os.listdir(storage_location):
print("Copying coredump file %s to %s" % (coredump_name, storage_location))
shutil.copy2(coredump_name, storage_location)
def inspect_and_delete_coredump_file(host_os, arch, coredump_name):
""" Prints information from the specified coredump and creates a backup of it
Args:
host_os (String) : os
arch (String) : architecture
coredump_name (String) : name of the coredump to print
"""
print_info_from_coredump_file(host_os, arch, coredump_name, "%s/corerun" % os.environ["CORE_ROOT"])
preserve_coredump_file(coredump_name)
os.remove(coredump_name)
def inspect_and_delete_coredump_files(host_os, arch, test_location):
""" Finds all coredumps under test_location, prints some basic information about them
to the console, and creates a backup of the dumps for further investigation
Args:
host_os (String) : os
arch (String) : architecture
test_location (String) : the folder under which to search for coredumps
"""
# This function prints some basic information from core files in the current
# directory and deletes them immediately.
# Depending on distro/configuration, the core files may either be named "core"
# or "core.<PID>" by default. We will read /proc/sys/kernel/core_uses_pid to
# determine which one it is.
# On OS X/macOS, we checked the kern.corefile value before enabling core dump
# generation, so we know it always includes the PID.
coredump_name_uses_pid=False
print("Looking for coredumps...")
if "%P" in coredump_pattern:
coredump_name_uses_pid=True
elif host_os == "Linux" and os.path.isfile("/proc/sys/kernel/core_uses_pid"):
with open("/proc/sys/kernel/core_uses_pid", "r") as f:
if f.read().rstrip() == "1":
coredump_name_uses_pid=True
filter_pattern = ""
regex_pattern = ""
matched_file_count = 0
if coredump_name_uses_pid:
filter_pattern = "core.*"
regex_pattern = "core.[0-9]+"
else:
filter_pattern = "core"
regex_pattern = "core"
for dir_path, dir_names, file_names in os.walk(test_location):
for file_name in fnmatch.filter(file_names, filter_pattern):
if re.match(regex_pattern, file_name):
print("Found coredump: %s in %s" % (file_name, dir_path))
matched_file_count += 1
inspect_and_delete_coredump_file(host_os, arch, os.path.join(dir_path, file_name))
print("Found %s coredumps." % matched_file_count)
def run_tests(args,
test_env_script_path=None):
""" Run the coreclr tests
Args:
args
test_env_script_path : Path to script to use to set the test environment, if any.
"""
if args.precompile_core_root:
precompile_core_root(args)
if args.skip_test_run:
return
# Set default per-test timeout to 15 minutes (in milliseconds).
per_test_timeout = 15*60*1000
# Setup the environment
if args.long_gc:
print("Running Long GC Tests, extending timeout to 20 minutes.")
per_test_timeout = 20*60*1000
print("Setting RunningLongGCTests=1")
os.environ["RunningLongGCTests"] = "1"
if args.gcsimulator:
print("Running GCSimulator tests, extending timeout to one hour.")
per_test_timeout = 60*60*1000
print("Setting RunningGCSimulatorTests=1")
os.environ["RunningGCSimulatorTests"] = "1"
if args.ilasmroundtrip:
print("Running ILasm round trip.")
print("Setting RunningIlasmRoundTrip=1")
os.environ["RunningIlasmRoundTrip"] = "1"
if args.run_crossgen_tests:
print("Running tests R2R")
print("Setting RunCrossGen=true")
os.environ["RunCrossGen"] = "true"
if args.run_crossgen2_tests:
print("Running tests R2R (Crossgen2)")
print("Setting RunCrossGen2=true")
os.environ["RunCrossGen2"] = "true"
if args.large_version_bubble:
print("Large Version Bubble enabled")
os.environ["LargeVersionBubble"] = "true"
if gc_stress:
print("Running GCStress, extending timeout to 120 minutes.")
per_test_timeout = 120*60*1000
if args.limited_core_dumps:
setup_coredump_generation(args.host_os)
if args.run_in_context:
print("Running test in an unloadable AssemblyLoadContext")
os.environ["CLRCustomTestLauncher"] = args.runincontext_script_path
os.environ["RunInUnloadableContext"] = "1";
per_test_timeout = 20*60*1000
# Set __TestTimeout environment variable, which is the per-test timeout in milliseconds.
# This is read by the test wrapper invoker, in src\coreclr\tests\src\Common\Coreclr.TestWrapper\CoreclrTestWrapperLib.cs.
print("Setting __TestTimeout=%s" % str(per_test_timeout))
os.environ["__TestTimeout"] = str(per_test_timeout)
# Set CORE_ROOT
print("Setting CORE_ROOT=%s" % args.core_root)
os.environ["CORE_ROOT"] = args.core_root
# Set __TestDotNetCmd so tests which need to run dotnet can use the repo-local script on dev boxes
os.environ["__TestDotNetCmd"] = args.dotnetcli_script_path
# Set test env script path if it is set.
if test_env_script_path is not None:
print("Setting __TestEnv=%s" % test_env_script_path)
os.environ["__TestEnv"] = test_env_script_path
#=====================================================================================================================================================
#
# This is a workaround needed to unblock our CI (in particular, Linux/arm and Linux/arm64 jobs) from the following failures appearing almost in every
# pull request (but hard to reproduce locally)
#
# System.IO.FileLoadException: Could not load file or assembly 'Exceptions.Finalization.XUnitWrapper, Version=0.0.0.0, Culture=neutral, PublicKeyToken=null'.
# An operation is not legal in the current state. (Exception from HRESULT: 0x80131509 (COR_E_INVALIDOPERATION))
#
# COR_E_INVALIDOPERATION comes from System.InvalidOperationException that is thrown during AssemblyLoadContext.ResolveUsingResolvingEvent
# when multiple threads attempt to modify an instance of Dictionary (managedAssemblyCache) during Xunit.DependencyContextAssemblyCache.LoadManagedDll call.
#
# In order to mitigate the failure we built our own xunit.console.dll with ConcurrentDictionary used for managedAssemblyCache and use this instead of
# the one pulled from NuGet. The exact code that got built can be found at the following fork of Xunit
# * https://github.com/echesakovMSFT/xunit/tree/UseConcurrentDictionaryInDependencyContextAssemblyCache
#
# The assembly was built using Microsoft Visual Studio v15.9.0-pre.4.0 Developer Command Prompt using the following commands
# 1) git clone https://github.com/echesakovMSFT/xunit.git --branch UseConcurrentDictionaryInDependencyContextAssemblyCache --single-branch
# 2) cd xunit
# 3) git submodule update --init
# 4) powershell .\build.ps1 -target packages -buildAssemblyVersion 2.4.1 -buildSemanticVersion 2.4.1-coreclr
#
# Then file "xunit\src\xunit.console\bin\Release\netcoreapp2.0\xunit.console.dll" was archived and uploaded to the clrjit blob storage.
#
# Ideally, this code should be removed when we find a more robust way of running Xunit tests.
#
# References:
# * https://github.com/dotnet/runtime/issues/11232
# * https://github.com/dotnet/runtime/issues/11320
# * https://github.com/xunit/xunit/issues/1842
# * https://github.com/xunit/xunit/pull/1846
#
#=====================================================================================================================================================
print("Download and overwrite xunit.console.dll in Core_Root")
urlretrieve = urllib.urlretrieve if sys.version_info.major < 3 else urllib.request.urlretrieve
zipfilename = os.path.join(tempfile.gettempdir(), "xunit.console.dll.zip")
url = r"https://clrjit.blob.core.windows.net/xunit-console/xunit.console.dll-v2.4.1.zip"
urlretrieve(url, zipfilename)
with zipfile.ZipFile(zipfilename,"r") as ziparch:
ziparch.extractall(os.path.join(args.core_root, "xunit"))
os.remove(zipfilename)
assert not os.path.isfile(zipfilename)
return call_msbuild(args)
def setup_args(args):
""" Setup the args based on the argparser obj
Args:
args(ArgParser): Parsed arguments
Notes:
If there is no core_root, or test location passed, create a default
location using the build type and the arch.
"""
requires_coreroot = args.host_os != "Browser" and args.host_os != "Android"
coreclr_setup_args = CoreclrArguments(args,
require_built_test_dir=True,
require_built_core_root=requires_coreroot,
require_built_product_dir=False)
normal_location = os.path.join(coreclr_setup_args.artifacts_location, "tests", "coreclr", "%s.%s.%s" % (coreclr_setup_args.host_os, coreclr_setup_args.arch, coreclr_setup_args.build_type))
# If we have supplied our own test location then we need to create a test location
# that the scripting will expect. As it is now, there is a dependency on the
# test location being under test/<os>.<build_type>.<arch>
# Make sure that we are using the correct build_type. This is a test drop, it is possible
# that we are inferring the build type to be Debug incorrectly.
if coreclr_setup_args.build_type not in coreclr_setup_args.test_location:
# Remove punctuation
corrected_build_type = re.sub("[%s]" % string.punctuation, "", coreclr_setup_args.test_location.split(".")[-1])
coreclr_setup_args.verify(corrected_build_type,
"build_type",
coreclr_setup_args.check_build_type,
"Unsupported configuration: %s.\nSupported configurations: %s" % (corrected_build_type, ", ".join(coreclr_setup_args.valid_build_types)))
if coreclr_setup_args.test_location is not None and coreclr_setup_args.test_location != normal_location:
print("Error, msbuild currently expects tests in {} (got test_location {})".format(normal_location, coreclr_setup_args.test_location))
raise Exception("Error, msbuild currently expects tests in artifacts/tests/...")
coreclr_setup_args.verify(args,
"test_env",
lambda arg: True,
"Error setting test_env")
coreclr_setup_args.verify(args,
"analyze_results_only",
lambda arg: True,
"Error setting analyze_results_only")
coreclr_setup_args.verify(args,
"crossgen_altjit",
lambda arg: True,
"Error setting crossgen_altjit")
coreclr_setup_args.verify(args,
"rid",
lambda arg: True,
"Error setting rid")
coreclr_setup_args.verify(args,
"il_link",
lambda arg: True,
"Error setting il_link")
coreclr_setup_args.verify(args,
"long_gc",
lambda arg: True,
"Error setting long_gc")
coreclr_setup_args.verify(args,
"gcsimulator",
lambda arg: True,
"Error setting gcsimulator")
coreclr_setup_args.verify(args,
"ilasmroundtrip",
lambda arg: True,
"Error setting ilasmroundtrip")
coreclr_setup_args.verify(args,
"large_version_bubble",
lambda arg: True,
"Error setting large_version_bubble")
coreclr_setup_args.verify(args,
"run_crossgen_tests",
lambda arg: True,
"Error setting run_crossgen_tests")
coreclr_setup_args.verify(args,
"run_crossgen2_tests",
lambda unused: True,
"Error setting run_crossgen2_tests")
coreclr_setup_args.verify(args,
"precompile_core_root",
lambda arg: True,
"Error setting precompile_core_root")
coreclr_setup_args.verify(args,
"skip_test_run",
lambda arg: True,
"Error setting skip_test_run")
coreclr_setup_args.verify(args,
"sequential",
lambda arg: True,
"Error setting sequential")
coreclr_setup_args.verify(args,
"verbose",
lambda arg: True,
"Error setting verbose")
coreclr_setup_args.verify(args,
"limited_core_dumps",
lambda arg: True,
"Error setting limited_core_dumps")
coreclr_setup_args.verify(args,
"test_native_bin_location",
lambda arg: True,
"Error setting test_native_bin_location")
coreclr_setup_args.verify(args,
"run_in_context",
lambda arg: True,
"Error setting run_in_context")
is_same_os = False
is_same_arch = False
is_same_build_type = False
# We will write out build information into the test directory. This is used
# by run.py to determine whether we need to rebuild the test wrappers.
if os.path.isfile(os.path.join(coreclr_setup_args.test_location, "build_info.json")):
with open(os.path.join(coreclr_setup_args.test_location, "build_info.json")) as file_handle:
build_info = json.load(file_handle)
is_same_os = build_info["build_os"] == coreclr_setup_args.host_os
is_same_arch = build_info["build_arch"] == coreclr_setup_args.arch
is_same_build_type = build_info["build_type"] == coreclr_setup_args.build_type
if coreclr_setup_args.host_os != "windows" and not (is_same_os and is_same_arch and is_same_build_type):
test_native_bin_location = None
if args.test_native_bin_location is None:
test_native_bin_location = os.path.join(os.path.join(coreclr_setup_args.artifacts_location, "tests", "coreclr", "obj", "%s.%s.%s" % (coreclr_setup_args.host_os, coreclr_setup_args.arch, coreclr_setup_args.build_type)))
else:
test_native_bin_location = args.test_native_bin_location
coreclr_setup_args.verify(test_native_bin_location,
"test_native_bin_location",
lambda test_native_bin_location: os.path.isdir(test_native_bin_location),
"Error setting test_native_bin_location")
else:
setattr(coreclr_setup_args, "test_native_bin_location", None)
print("host_os : %s" % coreclr_setup_args.host_os)
print("arch : %s" % coreclr_setup_args.arch)
print("build_type : %s" % coreclr_setup_args.build_type)
print("runtime_repo_location : %s" % coreclr_setup_args.runtime_repo_location)
print("product_location : %s" % coreclr_setup_args.product_location)
print("core_root : %s" % coreclr_setup_args.core_root)
print("test_location : %s" % coreclr_setup_args.test_location)
print("test_native_bin_location : %s" % coreclr_setup_args.test_native_bin_location)
coreclr_setup_args.crossgen_path = os.path.join(coreclr_setup_args.core_root, "crossgen%s" % (".exe" if coreclr_setup_args.host_os == "windows" else ""))
coreclr_setup_args.corerun_path = os.path.join(coreclr_setup_args.core_root, "corerun%s" % (".exe" if coreclr_setup_args.host_os == "windows" else ""))
coreclr_setup_args.dotnetcli_script_path = os.path.join(coreclr_setup_args.runtime_repo_location, "dotnet%s" % (".cmd" if coreclr_setup_args.host_os == "windows" else ".sh"))
coreclr_setup_args.coreclr_tests_dir = os.path.join(coreclr_setup_args.coreclr_dir, "tests")
coreclr_setup_args.coreclr_tests_src_dir = os.path.join(coreclr_setup_args.runtime_repo_location, "src", "tests")
coreclr_setup_args.runincontext_script_path = os.path.join(coreclr_setup_args.coreclr_tests_src_dir, "Common", "scripts", "runincontext%s" % (".cmd" if coreclr_setup_args.host_os == "windows" else ".sh"))
coreclr_setup_args.logs_dir = os.path.join(coreclr_setup_args.artifacts_location, "log")
return coreclr_setup_args
def precompile_core_root(args):
""" Precompile all of the assemblies in the core_root directory
Args:
args
"""
skip_list = [
".*xunit.*",
".*api-ms-win-core.*",
".*api-ms-win.*",
".*System.Private.CoreLib.*"
]
unix_skip_list = [
".*mscorlib.*",
".*System.Runtime.WindowsRuntime.*",
".*System.Runtime.WindowsRuntime.UI.Xaml.*",
".*R2RDump.dll.*"
]
arm64_unix_skip_list = [
".*Microsoft.CodeAnalysis.VisualBasic.*",
".*System.Net.NameResolution.*",
".*System.Net.Sockets.*",
".*System.Net.Primitives.*"
]
if args.host_os != "windows":
skip_list += unix_skip_list
if args.arch == "arm64":
skip_list += arm64_unix_skip_list
assert os.path.isdir(args.test_location)
assert os.path.isdir(args.core_root)
def call_crossgen(file, env):
assert os.path.isfile(args.crossgen_path)
command = [args.crossgen_path, "/Platform_Assemblies_Paths", args.core_root, file]
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
proc.communicate()
return_code = proc.returncode
if return_code == -2146230517:
print("%s is not a managed assembly." % file)
return False
if return_code != 0:
print("Unable to precompile %s (%d)" % (file, return_code))
return False
print("Successfully precompiled %s" % file)
return True
print("Precompiling all assemblies in %s" % args.core_root)
print("")
env = os.environ.copy()
if not args.crossgen_altjit is None:
env["COMPlus_AltJit"]="*"
env["COMPlus_AltJitNgen"]="*"
env["COMPlus_AltJitName"]=args.crossgen_altjit
env["COMPlus_AltJitAssertOnNYI"]="1"
env["COMPlus_NoGuiOnAssert"]="1"
env["COMPlus_ContinueOnAssert"]="0"
dlls = [os.path.join(args.core_root, item) for item in os.listdir(args.core_root) if item.endswith("dll") and "mscorlib" not in item]
def in_skip_list(item):
found = False
for skip_re in skip_list:
if re.match(skip_re, item.lower()) is not None:
found = True
return found
dlls = [dll for dll in dlls if not in_skip_list(dll)]
for dll in dlls:
call_crossgen(dll, env)
print("")
if sys.version_info.major < 3:
def to_unicode(s):
return unicode(s, "utf-8")
else:
def to_unicode(s):
return s
def find_test_from_name(host_os, test_location, test_name):
""" Given a test's name return the location on disk
Args:
host_os (str) : os
test_location (str) :path to the coreclr tests
test_name (str) : Name of the test, all special characters will have
: been replaced with underscores.
Return:
test_path (str): Path of the test based on its name
"""
location = test_name
# Lambdas and helpers
is_file_or_dir = lambda path : os.path.isdir(path) or os.path.isfile(path)
def match_filename(test_path):
# Scan through the test directory looking for a similar
# file
global file_name_cache
if not os.path.isdir(os.path.dirname(test_path)):
pass
assert os.path.isdir(os.path.dirname(test_path))
size_of_largest_name_file = 0
dir_contents = file_name_cache[os.path.dirname(test_path)]
if dir_contents is None:
dir_contents = defaultdict(lambda: None)
for item in os.listdir(os.path.dirname(test_path)):
dir_contents[re.sub("[%s]" % string.punctuation, "_", item)] = item
file_name_cache[os.path.dirname(test_path)] = dir_contents
# It is possible there has already been a match
# therefore we need to remove the punctuation again.
basename_to_match = re.sub("[%s]" % string.punctuation, "_", os.path.basename(test_path))
if basename_to_match in dir_contents:
test_path = os.path.join(os.path.dirname(test_path), dir_contents[basename_to_match])
size_of_largest_name_file = len(max(dir_contents, key=len))
return test_path, size_of_largest_name_file
def dir_has_nested_substrings(test_path, test_item):
""" A directory has multiple paths where one path is a substring of another
"""
dir_contents = file_name_cache[os.path.dirname(test_path)]
if dir_contents is None:
dir_contents = defaultdict(lambda: None)
for item in os.listdir(os.path.dirname(test_path)):
dir_contents[re.sub("[%s]" % string.punctuation, "_", item)] = item
file_name_cache[os.path.dirname(test_path)] = dir_contents
test_item = re.sub("[%s]" % string.punctuation, "_", test_item)
count = 0
for item in dir_contents:
if test_item in item:
count += 1
return count > 1
# Find the test by searching down the directory list.
starting_path = test_location
loc_split = location.split("_")
append = False
for index, item in enumerate(loc_split):
if not append:
test_path = os.path.join(starting_path, item)
else:
append = False
test_path, size_of_largest_name_file = match_filename(starting_path + "_" + item)
if not is_file_or_dir(test_path):
append = True
# It is possible that there is another directory that is named
# without an underscore.
elif index + 1 < len(loc_split) and os.path.isdir(test_path):
next_test_path = os.path.join(test_path, loc_split[index + 1])
if not is_file_or_dir(next_test_path) or dir_has_nested_substrings(test_path, item):
added_path = test_path
for forward_index in range(index + 1, len(loc_split)):
added_path, size_of_largest_name_file = match_filename(added_path + "_" + loc_split[forward_index])
if is_file_or_dir(added_path):
append = True
break
elif size_of_largest_name_file < len(os.path.basename(added_path)):
break
starting_path = test_path
location = starting_path
if not os.path.isfile(location):
print("Warning: couldn't find test: %s" % test_name)
return None
assert(os.path.isfile(location))
return location
def parse_test_results(args):
""" Parse the test results for test execution information
Args:
args : arguments
"""
log_path = os.path.join(args.logs_dir, "TestRunResults_%s_%s_%s" % (args.host_os, args.arch, args.build_type))
print("Parsing test results from (%s)" % log_path)
test_run_location = os.path.join(args.logs_dir, "testRun.xml")
if not os.path.isfile(test_run_location):
# Check if this is a casing issue
found = False
for item in os.listdir(args.logs_dir):
item_lower = item.lower()
if item_lower == "testrun.xml":
# Correct the name.
os.rename(os.path.join(args.logs_dir, item), test_run_location)
found = True
break
if not found:
print("Unable to find testRun.xml. This normally means the tests did not run.")
print("It could also mean there was a problem logging. Please run the tests again.")
return
print("Analyzing {}".format(test_run_location))
assemblies = xml.etree.ElementTree.parse(test_run_location).getroot()
tests = defaultdict(lambda: None)
for assembly in assemblies:
for collection in assembly:
if collection.tag == "errors" and collection.text != None:
# Something went wrong during running the tests.
print("Error running the tests, please run run.py again.")
sys.exit(1)
elif collection.tag != "errors":
test_name = None
for test in collection:
type = test.attrib["type"]
method = test.attrib["method"]
type = type.split("._")[0]
test_name = type + method
assert test_name != None
failed = collection.attrib["failed"]
skipped = collection.attrib["skipped"]
passed = collection.attrib["passed"]
time = float(collection.attrib["time"])
test_output = None
if failed == "1":
failure_info = collection[0][0]
test_output = failure_info.text
test_location_on_filesystem = find_test_from_name(args.host_os, args.test_location, test_name)
if test_location_on_filesystem is not None:
assert os.path.isfile(test_location_on_filesystem)
assert tests[test_name] == None
tests[test_name] = defaultdict(lambda: None, {
"name": test_name,
"test_path": test_location_on_filesystem,
"failed": failed,
"skipped": skipped,
"passed": passed,
"time": time,
"test_output": test_output
})
return tests
def print_summary(tests):
""" Print a summary of the test results
Args:
tests (defaultdict[String]: { }): The tests that were reported by
: xunit
"""
assert tests is not None
failed_tests = []
passed_tests = []
skipped_tests = []
for test in tests:
test = tests[test]
if test["failed"] == "1":
failed_tests.append(test)
elif test["passed"] == "1":
passed_tests.append(test)
else:
skipped_tests.append(test)
failed_tests.sort(key=lambda item: item["time"], reverse=True)
passed_tests.sort(key=lambda item: item["time"], reverse=True)
skipped_tests.sort(key=lambda item: item["time"], reverse=True)
def print_tests_helper(tests, stop_count):
for index, item in enumerate(tests):
time = item["time"]
unit = "seconds"
time_remainder = ""
second_unit = ""
saved_time = time
remainder_str = ""
# If it can be expressed in hours
if time > 60**2:
time = saved_time / (60**2)
time_remainder = saved_time % (60**2)
time_remainder /= 60
time_remainder = math.floor(time_remainder)
unit = "hours"
second_unit = "minutes"
remainder_str = " %s %s" % (int(time_remainder), second_unit)
elif time > 60 and time < 60**2:
time = saved_time / 60
time_remainder = saved_time % 60
time_remainder = math.floor(time_remainder)
unit = "minutes"
second_unit = "seconds"
remainder_str = " %s %s" % (int(time_remainder), second_unit)
print("%s (%d %s%s)" % (item["test_path"], time, unit, remainder_str))
if stop_count != None:
if index >= stop_count:
break
if len(failed_tests) > 0:
print("%d failed tests:" % len(failed_tests))
print("")
print_tests_helper(failed_tests, None)
# The following code is currently disabled, as it produces too much verbosity in a normal
# test run. It could be put under a switch, or else just enabled as needed when investigating
# test slowness.
#
# if len(passed_tests) > 50:
# print("")
# print("50 slowest passing tests:")
# print("")
# print_tests_helper(passed_tests, 50)
if len(failed_tests) > 0:
print("")
print("#################################################################")
print("Output of failing tests:")
print("")
for item in failed_tests:
print("[%s]: " % item["test_path"])
print("")
test_output = item["test_output"]
# XUnit results are captured as escaped characters.
#test_output = test_output.replace("\\r", "\r")
#test_output = test_output.replace("\\n", "\n")
#test_output = test_output.replace("/r", "\r")
#test_output = test_output.replace("/n", "\n")
# Replace CR/LF by just LF; Python "print", below, will map as necessary on the platform.
# If we don't do this, then Python on Windows will convert \r\n to \r\r\n on output.
test_output = test_output.replace("\r\n", "\n")
unicode_output = None
if sys.version_info < (3,0):
# Handle unicode characters in output in python2.*
try:
unicode_output = unicode(test_output, "utf-8")
except:
print("Error: failed to convert Unicode output")
else:
unicode_output = test_output
if unicode_output is not None:
print(unicode_output)
print("")
print("")
print("#################################################################")
print("End of output of failing tests")
print("#################################################################")
print("")
print("")
print("Total tests run : %d" % len(tests))
print("Total passing tests: %d" % len(passed_tests))
print("Total failed tests : %d" % len(failed_tests))
print("Total skipped tests: %d" % len(skipped_tests))
print("")
def create_repro(args, env, tests):
""" Go through the failing tests and create repros for them
Args:
args
env
tests (defaultdict[String]: { }): The tests that were reported by
: xunit
"""
assert tests is not None
failed_tests = [tests[item] for item in tests if tests[item]["failed"] == "1"]
if len(failed_tests) == 0:
return
repro_location = os.path.join(args.artifacts_location, "repro", "%s.%s.%s" % (args.host_os, args.arch, args.build_type))
if os.path.isdir(repro_location):
shutil.rmtree(repro_location)
print("")
print("Creating repro files at: %s" % repro_location)
os.makedirs(repro_location)
assert os.path.isdir(repro_location)
# Now that the repro_location exists under <runtime>/artifacts/repro
# create wrappers which will simply run the test with the correct environment
for test in failed_tests:
debug_env = DebugEnv(args, env, test)
debug_env.write_repro()
print("Repro files written.")
################################################################################
# Main
################################################################################
def main(args):
global g_verbose
g_verbose = args.verbose
ret_code = 0
args = setup_args(args)
env = get_environment(test_env=args.test_env)
if not args.analyze_results_only:
if args.test_env is not None:
ret_code = run_tests(args, args.test_env)
else:
ret_code = create_and_use_test_env(args.host_os,
env,
lambda test_env_script_path: run_tests(args, test_env_script_path))
print("Test run finished.")
if not args.skip_test_run:
tests = parse_test_results(args)
if tests is not None:
print_summary(tests)
create_repro(args, env, tests)
return ret_code
################################################################################
# __main__
################################################################################
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
|
# -*- coding: utf-8 -*-
"""
sphinx.websupport
~~~~~~~~~~~~~~~~~
Base Module for web support functions.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import warnings
from sphinx.deprecation import RemovedInSphinx20Warning
try:
from sphinxcontrib.websupport import WebSupport # NOQA
from sphinxcontrib.websupport import errors # NOQA
from sphinxcontrib.websupport.search import BaseSearch, SEARCH_ADAPTERS # NOQA
from sphinxcontrib.websupport.storage import StorageBackend # NOQA
warnings.warn('sphinx.websupport module is now provided as sphinxcontrib-webuspport. '
'sphinx.websupport will be removed in Sphinx-2.0. Please use it instaed',
RemovedInSphinx20Warning)
except ImportError:
warnings.warn('Since Sphinx-1.6, sphinx.websupport module is now separated to '
'sphinxcontrib-webuspport package. Please add it into your dependency list.')
|
#!/usr/bin/env python
from __future__ import print_function
import roslib
roslib.load_manifest('faa_computer_admin')
import rospy
import argparse
import subprocess
from faa_utilities import FindData
from faa_data_processing import TrackingDataProcessor
from faa_data_processing import VideoDataProcessor
from faa_data_processing import FigureDataProcessor
def process(path_list,overwrite,tracking,video,figure):
"""
Process data
"""
fd = FindData(overwrite)
path = path_list[0]
if not tracking and not video and not figure:
tracking = True
video = True
figure = True
if figure and not tracking:
tracking = True
if tracking:
contains_data = fd.path_contains_tracking_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain tracking data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain tracking data, or tracking data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
tdp = TrackingDataProcessor(overwrite)
tdp.find_and_process_data(path)
if video:
contains_data = fd.path_contains_video_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain video data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain video data, or video data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
vdp = VideoDataProcessor(overwrite)
vdp.find_and_process_data(path)
if figure:
contains_data = fd.path_contains_figure_data(path)
if not contains_data and overwrite:
print("Path does not exist or does not contain figure data.")
elif not contains_data and not overwrite:
print("Path does not exist, does not contain figure data, or figure data has already been processed.")
print("Try -o overwrite switch to reprocess data.")
fdp = FigureDataProcessor(overwrite)
fdp.find_and_process_data(path)
def calibrate():
"""
Starts the camera calibration application
"""
_roslaunch('calibrate_camera.launch')
def experiment(no_usb_hardware):
"""
Starts the experiment mode application
"""
if no_usb_hardware:
print("Running in test mode with no USB hardware attached.")
options = {'hardware': "false"}
else:
print("USB hardware attached!")
options = {'hardware': "true"}
reuse_background_images = False
if reuse_background_images:
print("Reusing background images.")
options['reusing_bg_images'] = "true"
else:
options['reusing_bg_images'] = "false"
_roslaunch('experiment.launch',options)
def manual(no_usb_hardware):
"""
Starts the manual mode application
"""
if no_usb_hardware:
print("Running in test mode with no USB hardware attached.")
options = {'hardware': "false"}
else:
print("USB hardware attached!")
options = {'hardware': "true"}
_roslaunch('manual_control.launch',options)
def save_images():
"""
Starts the save images application
"""
options = {}
_roslaunch('save_images.launch',options)
def _roslaunch(launch_file,options={}):
"""
Runs a roslaunch file.
"""
try:
call_list = ['roslaunch', 'faa_launch', launch_file]
for option in options:
call_list.append(option + ":=" + str(options[option]))
subprocess.call(call_list)
except KeyboardInterrupt:
return
def cli():
parser = argparse.ArgumentParser(description='Fly Alcohol Assay Control')
# parser.add_argument('-t','--test',action="store_true",
# help='launch test.launch')
parser.add_argument('-c','--calibrate',action="store_true",
help='launch calibrate_camera.launch')
parser.add_argument('-e','--experiment',action="store_true", default=True,
help='launch experiment.launch')
parser.add_argument('-n','--no-usb-hardware',action="store_true",
help='set testing mode when USB hardware is not attached')
parser.add_argument('-p','--process',dest='process_path',nargs=1,default=False,
help='process data within directory')
parser.add_argument('-o','--overwrite',action="store_true", default=False,
help='reprocess data and overwrite processed data files')
parser.add_argument('-t','--tracking',action="store_true", default=False,
help='process tracking data')
parser.add_argument('-v','--video',action="store_true", default=False,
help='process videos')
parser.add_argument('-f','--figure',action="store_true", default=False,
help='process data figure')
# parser.add_argument('-r','--reuse-background-images',action="store_true",
# help='reuse background images when testing')
parser.add_argument('-m','--manual',action="store_true",
help='launch manual control GUI')
args = parser.parse_args()
if args.process_path:
process(args.process_path,args.overwrite,args.tracking,args.video,args.figure)
elif args.calibrate:
calibrate()
# elif args.save_images:
# save_images()
elif args.manual:
manual(args.no_usb_hardware)
elif args.experiment:
experiment(args.no_usb_hardware)
|
from .init import init
from .draw_timeseries_graph import draw_timeseries_graph
from .draw_pie_charts import draw_pie_chart
from .draw_top_list import draw_top_list
__all__ = ["init", "draw_timeseries_graph", "draw_pie_chart", "draw_top_list"]
|
# -*- coding: utf-8 -*-
"""read_csv
Read the different csv files
Created on Mon Oct 11 21:30:00 2021 @author: Dan Kotlyar
Last updated on Mon Oct 11 21:45:00 2021 @author: Dan Kotlyar
"""
import numpy as np
import pandas as pd
def ReadCsv(csvFile):
data = pd.read_csv('bootstrap.csv')
ID = np.array(data['ZAID'], dtype=int)
xsTypes = np.array(data['MT'], dtype=int)
xsVals = np.array(data["XS [barns]"], dtype=float)
N0 = np.array(data["N0 [atoms/b-cm]"], dtype=float)
fullID = np.unique(ID) # unique isotopes
nIsotopes = len(fullID)
# 1-ID, 2-ND, 3-cap, 4-fiss, 5-(n,alpha)
xsTable = np.zeros((nIsotopes, 5))
xsTable[:, 0] = fullID
# obtain all the cross section types
numMTs = np.array([102, 18, 107])
for idx, numMT in enumerate(numMTs):
vals, idxFull, idx0 =\
np.intersect1d(fullID, ID[xsTypes == numMT], assume_unique=False,
return_indices=True)
if idx == 0:
xsTable[idxFull, 1] = N0[xsTypes == numMT][idx0]
xsTable[idxFull, idx+2] = xsVals[xsTypes == numMT][idx0]
idxFields = {"ID": 0, "N0": 1, "sig_c": 2, "sig_alpha": 3, "sig_f": 4}
return xsTable, idxFields
|
__all__ = [
'DesignerLinkLabel', 'RecentItem',
'RecentFilesBox' 'DesignerStartPage']
from utils.utils import get_designer, get_fs_encoding
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.uix.button import Button
import webbrowser
Builder.load_string("""
#: import theme_atlas utils.utils.theme_atlas
<DesignerButtonFit@DesignerButton>
size_hint_x: None
width: (self.texture_size[0]+sp(32))
<DesignerStartPage>:
btn_open: btn_open
btn_new: btn_new
recent_files_box: recent_files_box
orientation: 'vertical'
padding: (0, 0, 0, dp(20))
Label:
text: 'Kivy Designer'
font_size: '26pt'
size_hint_y: None
height: '40pt'
Label:
markup: True
text: '[i]Innovative User Interfaces, Desktop, and Mobile Development Made Easy.[/i]'
font_size: pt(12)
halign: 'center'
size_hint_y: None
height: '15pt'
GridLayout:
cols: 2
size_hint: None, None
height: self.minimum_height
width: self.minimum_width
pos_hint: {'center_x': 0.5}
padding: (0, pt(15), 0, 0)
spacing: '4sp'
DesignerButtonFit:
id: btn_open
text: 'Open Project'
on_release: root.dispatch('on_open_down')
DesignerButtonFit:
id: btn_new
text: 'New Project'
on_release: root.dispatch('on_new_down')
Label:
text: 'Getting Started'
font_size: '16pt'
bold: True
size_hint_y: None
height: '30pt'
GridLayout:
kivy_label: kivy_label
cols: 2
size_hint: None, None
height: self.minimum_height
width: '450dp'
pos_hint: {'center_x': 0.5}
row_force_default: True
row_default_height: '40sp'
spacing: '4sp'
padding: '16sp', '0sp'
DesignerLinkLabel:
id: kivy_label
text: ' Kivy'
link: 'http://kivy.org'
DesignerLinkLabel:
text: ' Kivy Designer Help'
on_release: root.dispatch('on_help')
DesignerLinkLabel:
id: kivy_label
text: ' Kivy Documentation'
link: 'http://kivy.org/docs'
DesignerLinkLabel:
text: ' Kivy Designer Documentation'
link: 'http://kivy-designer.readthedocs.org/'
Label:
text: 'Recent Projects'
font_size: '16pt'
bold: True
size_hint_y: None
height: '30pt'
RecentFilesBox:
id: recent_files_box
pos_hint: {'center_x': 0.5}
size_hint_x: None
width: '600dp'
canvas.before:
Color:
rgba: (1, 1, 1, 0.05)
Rectangle:
pos: self.pos
size: self.size
<DesignerLinkLabel>:
color: (0, 0, 1, 1)
background_normal: theme_atlas('action_item')
background_disabled_normal: theme_atlas('action_item_disabled')
text_size: self.width, None
<RecentFilesBox>:
grid: grid
cols: 1
padding: '2sp'
size_hint_x: None
bar_width: '10dp'
scroll_type: ['bars', 'content']
GridLayout:
id: grid
cols: 1
size_hint_y: None
height: '1dp'
<RecentItem>:
orientation: 'vertical'
size_hint: 1, None
height: '40dp'
on_touch_down: if self.collide_point(*args[1].pos): root.dispatch('on_press')
canvas.after:
Color:
rgb: (0.2, 0.2, 0.2)
Rectangle:
pos: ((self.x+dp(25)), self.y)
size: ((self.width-dp(50)), dp(1))
Label:
text: root.path
text_size: self.size
valign: 'middle'
shorten: True
padding_x: '20dp'
""")
class DesignerLinkLabel(Button):
'''DesignerLinkLabel displays a http link and opens it in a browser window
when clicked.
'''
link = StringProperty(None)
'''Contains the http link to be opened.
:data:`link` is a :class:`~kivy.properties.StringProperty`
'''
def on_release(self, *args):
'''Default event handler for 'on_release' event.
'''
if self.link:
webbrowser.open(self.link)
class RecentItem(BoxLayout):
path = StringProperty('')
'''Contains the application path
:data:`path` is a :class:`~kivy.properties.StringProperty`
'''
__events__ = ('on_press', )
def on_press(self, *args):
'''Item pressed
'''
class RecentFilesBox(ScrollView):
'''Container consistings of buttons, with their names specifying
the recent files.
'''
grid = ObjectProperty(None)
'''The grid layout consisting of all buttons.
This property is an instance of :class:`~kivy.uix.gridlayout`
:data:`grid` is a :class:`~kivy.properties.ObjectProperty`
'''
def __init__(self, **kwargs):
super(RecentFilesBox, self).__init__(**kwargs)
def add_recent(self, list_files):
'''To add buttons representing Recent Files.
:param list_files: array of paths
'''
for p in list_files:
if isinstance(p, bytes):
p = p.decode(get_fs_encoding())
recent_item = RecentItem(path=p)
self.grid.add_widget(recent_item)
recent_item.bind(on_press=self.btn_release)
self.grid.height += recent_item.height
self.grid.height = max(self.grid.height, self.height)
def btn_release(self, instance):
'''Event Handler for 'on_release' of an event.
'''
d = get_designer()
d.ids.toll_bar_top._perform_open(instance.path)
class DesignerStartPage(BoxLayout):
recent_files_box = ObjectProperty(None)
'''This property is an instance
of :class:`~designer.components.start_page.RecentFilesBox`
:data:`recent_files_box` is a :class:`~kivy.properties.ObjectProperty`
'''
__events__ = ('on_open_down', 'on_new_down', 'on_help')
def on_open_down(self, *args):
'''Default Event Handler for 'on_open_down'
'''
pass
def on_new_down(self, *args):
'''Default Event Handler for 'on_new_down'
'''
pass
def on_help(self, *args):
'''Default Event Handler for 'on_help'
'''
pass
|
from tethys_sdk.testing import TethysTestCase
from tethys_compute.models.dask.dask_scheduler import Scheduler, DaskScheduler
from tethys_compute.models.dask.dask_job import DaskJob
from django.contrib.auth.models import User
import dask
from unittest import mock
import time
@dask.delayed
def inc(x):
return x + 1
@dask.delayed
def double(x):
return x + 2
@dask.delayed
def add(x, y):
time.sleep(2)
return x + y
class DaskJobTest(TethysTestCase):
def set_up(self):
self.user = User.objects.create_user('tethys_super', 'user@example.com', 'pass')
self.scheduler = DaskScheduler(
name='test_dask_scheduler',
host='127.0.0.1:8000',
timeout=10,
heartbeat_interval=5,
dashboard='test_dashboard',
)
self.scheduler.save()
def tear_down(self):
self.scheduler.delete()
@mock.patch('tethys_compute.models.dask.dask_job.Client')
def test_client_prop_with_invalid_scheduler(self, mock_client):
mock_client.return_value = 'test_client'
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=None)
# Execute
ret = djob.client
# Check result
self.assertEqual('test_client', ret)
mock_client.assert_called()
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_client_prop_with_valid_scheduler(self, mock_client):
mock_client.return_value = 'test_client'
dask_scheduler = Scheduler.objects.get_subclass(name='test_dask_scheduler')
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=dask_scheduler)
# Execute
ret = djob.client
# Check result
self.assertEqual('test_client', ret)
mock_client.assert_called_with(address='127.0.0.1:8000', heartbeat_interval=5, timeout=10)
@mock.patch('tethys_compute.models.dask.dask_job.Client')
def test_client_no_scheduler_prop(self, mock_client):
mock_client.return_value = 'test_default_client'
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label')
# Execute
ret = djob.client
# Check result
self.assertEqual('test_default_client', ret)
mock_client.assert_called_with()
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
@mock.patch('tethys_compute.models.dask.dask_job.Future')
def test_future_prop(self, mock_future, mock_client):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run rando function with a future handler
future = client.submit(inc, 1)
# Get the key from future handler and assign it to DaskJob key to keep track of this inc function
djob.key = future.key
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
mock_future.assert_called_with(key='test_key', client=mock_client_ret)
self.assertEqual(mock_future(), ret)
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_future_prop_no_key(self, mock_client):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run inc function with a future handler
client.submit(inc, 1)
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
@mock.patch('tethys_compute.models.dask.dask_job.Future')
def test_future_prop_exception(self, mock_future, mock_client, mock_log):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_client_ret.submit.return_value = mock.MagicMock(key='test_key')
mock_future.side_effect = Exception('exception in creating future')
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Get Scheduler Client from DaskJob using client property
client = djob.client
# Use this Client to run inc function with a future handler
future = client.submit(inc, 1)
# Get the key from future handler and assign it to DaskJob key to keep track of this inc function
djob.key = future.key
# Use DaskJob future property to get back the inc function
ret = djob.future
# Check result
self.assertIsNone(ret)
mock_log.exception.assert_called_with('Dask Future Init Error')
@mock.patch('tethys_compute.models.dask.dask_job.fire_and_forget')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_execute_delayed(self, mock_client, mock_save, mock_ff):
mock_client_ret = mock.MagicMock()
mock_client.return_value = mock_client_ret
mock_future = mock.MagicMock(key='test_key')
mock_client_ret.compute.return_value = mock_future
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Delayed option
delayed = dask.delayed(inc)(1)
# _Execute
djob._execute(delayed)
# Check result
mock_client_ret.compute.assert_called_with(delayed)
self.assertEqual('test_key', djob.key)
mock_save.assert_called()
mock_ff.assert_called_with(mock_future)
@mock.patch('tethys_compute.models.dask.dask_job.isinstance')
@mock.patch('tethys_compute.models.dask.dask_job.fire_and_forget')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_scheduler.Client')
def test_execute_future(self, mock_client, mock_save, mock_ff, mock_isinstance):
mock_client.return_value = mock.MagicMock()
mock_isinstance.side_effect = [True, False]
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# get client from DaskJob
client = djob.client
# Future option
future = client.submit(inc, 2)
# _Execute
djob._execute(future)
# Check result
self.assertEqual(future.key, djob.key)
mock_save.assert_called()
mock_ff.assert_called_with(future)
def test_execute_not_future_delayed(self):
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# _Execute
self.assertRaises(ValueError, djob._execute, 1)
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_update_status(self, mock_future, mock_save, mock_client):
mock_future.status = 'finished'
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# call the function
djob._update_status()
# check the results
mock_client.close.assert_called()
mock_save.assert_called()
def test_update_status_with_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# check the results
self.assertIsNone(djob._update_status())
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_update_status_exception(self, mock_future, mock_save, mock_log):
# Invalid status key
mock_future.status = 'foo'
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler
)
# call the function
djob._update_status()
# check the results
mock_log.error.assert_called_with('Unknown Dask Status: "foo"')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_failed_lock(self, mock_re_lock, mock_apl):
mock_apl.return_value = False
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
self.assertIsNone(djob._process_results())
# check the result
mock_re_lock.assert_not_called()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future',
new_callable=mock.PropertyMock(return_value=None))
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
def test_process_result_no_future(self, mock_apl, _):
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
self.assertIsNone(djob._process_results())
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future',
new_callable=mock.PropertyMock())
def test_process_result_forget(self, _, mock_client):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler, forget=True)
# call the function
ret = djob._process_results()
# check the result
mock_client.close.assert_called()
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_result_function(self, mock_re_lock, mock_apl, mock_client, mock_future, mock_tfe):
fake_key = 'sum_faef'
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
djob.key = fake_key
# call the function
djob._process_results()
# check the result
mock_client.close.assert_called()
mock_client.gather.assert_called_with(mock_future)
mock_function.assert_called_with(mock_client.gather())
mock_client.set_metadata.assert_called_with(fake_key, False)
self.assertEqual('', djob.key)
mock_re_lock.assert_called()
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_process_result_with_client_gather_exception(self, mock_logger, mock_re_lock, mock_apl, mock_client,
mock_future, mock_tfe):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock(return_value='foo')
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
gather_exception = Exception('Fake exception')
mock_client.gather.side_effect = gather_exception
# Create DaskJob
djob = DaskJob(
name='test_dj',
user=self.user,
label='label',
scheduler=self.scheduler,
_process_results_function='test_function'
)
# call the function
djob._process_results()
# check the result
mock_client.gather.assert_called_with(mock_future)
mock_logger.warning.assert_called()
mock_function.assert_called_with(gather_exception)
mock_re_lock.assert_called()
@mock.patch('django.db.models.base.Model.save')
@mock.patch('tethys_compute.models.dask.dask_job.log')
@mock.patch('tethys_compute.models.tethys_job.TethysFunctionExtractor')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.client')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._acquire_pr_lock')
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob._release_pr_lock')
def test_process_result_with_result_function_with_exception(self, mock_re_lock, mock_apl, _, mock_client,
mock_tfe, mock_log, mock_save):
mock_function_extractor = mock.MagicMock()
mock_function = mock.MagicMock()
mock_function.side_effect = Exception
mock_function_extractor.valid = True
mock_function_extractor.function = mock_function
mock_tfe.return_value = mock_function_extractor
mock_apl.return_value = True
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler,
_process_results_function='test_function')
# call the function
djob._process_results()
# check the result
mock_log.exception.assert_called_with('Process Results Function Error')
self.assertEqual('ERR', djob._status)
mock_save.assert_called()
mock_re_lock.assert_called()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_stop(self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the stop function
djob.stop()
# Check result
mock_future.cancel.assert_called()
def test_pause(self):
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=self.scheduler)
# Execute and heck result
self.assertRaises(NotImplementedError, djob.pause)
def test_resume(self):
djob = DaskJob(name='test_dj', user=self.user, key='test_key', label='label', scheduler=self.scheduler)
# Execute and heck result
self.assertRaises(NotImplementedError, djob.resume)
def test_result(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# need to convert to string because it will convert to string when saving to the database
djob.result = 'serialized_results'
# call the function
ret = djob.result
# Check result
self.assertEqual('serialized_results', ret)
def test_result_none(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.result = None
# call the function
ret = djob.result
# Check result
self.assertIsNone(ret)
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_done(self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
ret = djob.done()
# Check result
mock_future.done.assert_called()
self.assertEqual(mock_future.done(), ret)
def test_done_with_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# Check result
self.assertIsNone(djob.done())
def test_update_status_interval_prop(self):
from datetime import timedelta
# Create DaskJob
djob = DaskJob(name='test_daskjob', user=self.user, label='label')
djob.save()
ret = DaskJob.objects.get(name='test_daskjob').update_status_interval
# Check result
self.assertIsInstance(ret, timedelta)
self.assertEqual(timedelta(0, 0), ret)
djob.delete()
@mock.patch('tethys_compute.models.dask.dask_job.DaskJob.future')
def test_retry(self, mock_future):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
djob.retry()
# Check result
mock_future.retry.assert_called()
def test_retry_no_future(self):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
# call the done function
self.assertIsNone(djob.retry())
@mock.patch('tethys_compute.models.dask.dask_job.log')
def test_fail_acquire_pr_lock(self, mock_log):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.extended_properties['processing_results'] = True
self.assertFalse(djob._acquire_pr_lock())
mock_log.warning.assert_called_with('Unable to aquire lock. Processing results already occurring. Skipping...')
@mock.patch('django.db.models.base.Model.save')
def test_fail_release_pr_lock(self, mock_save):
# Create DaskJob
djob = DaskJob(name='test_dj', user=self.user, label='label', scheduler=self.scheduler)
djob.extended_properties['processing_results'] = True
djob._release_pr_lock()
self.assertFalse(djob.extended_properties['processing_results'])
mock_save.assert_called()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.data.experimental` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
@@DatasetStructure
@@NestedStructure
@@OptimizationOptions
@@Optional
@@OptionalStructure
@@RandomDataset
@@Reducer
@@SparseTensorStructure
@@SqlDataset
@@StatsAggregator
@@StatsOptions
@@Structure
@@TFRecordWriter
@@TensorStructure
@@ThreadingOptions
@@bucket_by_sequence_length
@@bytes_produced_stats
@@cardinality
@@choose_from_datasets
@@copy_to_device
@@dense_to_sparse_batch
@@enumerate_dataset
@@filter_for_shard
@@get_next_as_optional
@@get_single_element
@@group_by_reducer
@@group_by_window
@@ignore_errors
@@latency_stats
@@make_batched_features_dataset
@@make_csv_dataset
@@make_saveable_from_iterator
@@map_and_batch
@@map_and_batch_with_legacy_function
@@parallel_interleave
@@parse_example_dataset
@@prefetch_to_device
@@rejection_resample
@@sample_from_datasets
@@scan
@@shuffle_and_repeat
@@take_while
@@unbatch
@@unique
@@AUTOTUNE
@@INFINITE_CARDINALITY
@@UNKNOWN_CARDINALITY
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function
from tensorflow.python.data.experimental.ops.batching import unbatch
from tensorflow.python.data.experimental.ops.cardinality import cardinality
from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY
from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY
from tensorflow.python.data.experimental.ops.counter import Counter
from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset
from tensorflow.python.data.experimental.ops.error_ops import ignore_errors
from tensorflow.python.data.experimental.ops.filter_for_shard_ops import filter_for_shard
from tensorflow.python.data.experimental.ops.get_single_element import get_single_element
from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length
from tensorflow.python.data.experimental.ops.grouping import group_by_reducer
from tensorflow.python.data.experimental.ops.grouping import group_by_window
from tensorflow.python.data.experimental.ops.grouping import Reducer
from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets
from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave
from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets
from tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.python.data.experimental.ops.optimization import AUTOTUNE
from tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions
from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset
from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device
from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device
from tensorflow.python.data.experimental.ops.random_ops import RandomDataset
from tensorflow.python.data.experimental.ops.readers import CsvDataset
from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset
from tensorflow.python.data.experimental.ops.readers import make_csv_dataset
from tensorflow.python.data.experimental.ops.readers import SqlDataset
from tensorflow.python.data.experimental.ops.resampling import rejection_resample
from tensorflow.python.data.experimental.ops.scan_ops import scan
from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.python.data.experimental.ops.stats_aggregator import StatsAggregator
from tensorflow.python.data.experimental.ops.stats_ops import bytes_produced_stats
from tensorflow.python.data.experimental.ops.stats_ops import latency_stats
from tensorflow.python.data.experimental.ops.stats_options import StatsOptions
from tensorflow.python.data.experimental.ops.take_while_ops import take_while
from tensorflow.python.data.experimental.ops.threading_options import ThreadingOptions
from tensorflow.python.data.experimental.ops.unique import unique
from tensorflow.python.data.experimental.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.dataset_ops import DatasetStructure
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
from tensorflow.python.data.ops.optional_ops import OptionalStructure
from tensorflow.python.data.util.structure import NestedStructure
from tensorflow.python.data.util.structure import SparseTensorStructure
from tensorflow.python.data.util.structure import Structure
from tensorflow.python.data.util.structure import TensorStructure
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
|
from test.dungeons.TestDungeon import TestDungeon
class TestSkullWoods(TestDungeon):
def testSkullWoodsFrontAllEntrances(self):
self.starting_regions = ['Skull Woods First Section', 'Skull Woods First Section (Left)', 'Skull Woods First Section (Top)']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Big Key (Skull Woods)']],
["Skull Woods - Big Chest", True, ['Big Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, []],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", True, []],
["Skull Woods - Pinball Room", True, []]
])
def testSkullWoodsFrontOnly(self):
self.starting_regions = ['Skull Woods First Section']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Never in logic']],
["Skull Woods - Compass Chest", False, []],
["Skull Woods - Compass Chest", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", False, []],
["Skull Woods - Pot Prison", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", False, []],
["Skull Woods - Pinball Room", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", True, ['Small Key (Skull Woods)']]
])
def testSkullWoodsLeftOnly(self):
self.starting_regions = ['Skull Woods First Section (Left)']
self.remove_exits = ['Skull Woods First Section Exit']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Never in logic']],
["Skull Woods - Compass Chest", True, []],
["Skull Woods - Map Chest", False, []],
["Skull Woods - Map Chest", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, []],
["Skull Woods - Pinball Room", True, []]
])
def testSkullWoodsBackOnly(self):
self.starting_regions = ['Skull Woods First Section (Top)']
self.remove_exits = ['Skull Woods First Section Exit']
self.run_tests([
["Skull Woods - Big Chest", False, []],
["Skull Woods - Big Chest", False, [], ['Big Key (Skull Woods)']],
["Skull Woods - Big Chest", True, ['Big Key (Skull Woods)']],
["Skull Woods - Compass Chest", False, []],
["Skull Woods - Compass Chest", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Compass Chest", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Map Chest", True, []],
["Skull Woods - Pot Prison", False, []],
["Skull Woods - Pot Prison", False, ['Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Pot Prison", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", False, []],
["Skull Woods - Pinball Room", False, [], ['Small Key (Skull Woods)']],
["Skull Woods - Pinball Room", True, ['Small Key (Skull Woods)']]
])
def testSkullWoodsMiddle(self):
self.starting_regions = ['Skull Woods Second Section']
self.remove_exits = ['Skull Woods Second Section Exit (East)', 'Skull Woods Second Section Exit (West)']
self.run_tests([["Skull Woods - Big Key Chest", True, []]])
def testSkullWoodsBack(self):
self.starting_regions = ['Skull Woods Final Section (Entrance)']
self.run_tests([
["Skull Woods - Bridge Room", True, []],
["Skull Woods - Boss", False, []],
["Skull Woods - Boss", False, [], ['Fire Rod']],
["Skull Woods - Boss", False, [], ['Progressive Sword']],
["Skull Woods - Boss", False, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)'], ['Small Key (Skull Woods)']],
["Skull Woods - Boss", True, ['Small Key (Skull Woods)', 'Small Key (Skull Woods)', 'Small Key (Skull Woods)', 'Fire Rod', 'Progressive Sword']],
])
|
from . import db
# connect class user to
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'SharedSetStatusEnum',
},
)
class SharedSetStatusEnum(proto.Message):
r"""Container for enum describing types of shared set statuses.
"""
class SharedSetStatus(proto.Enum):
r"""Enum listing the possible shared set statuses."""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
import asyncio
from asyncio import Task
from typing import Any, Callable
import appdaemon.plugins.hass.hassapi as hass
import appdaemon.plugins.mqtt.mqttapi as mqtt
import pytest
from cx_core import Controller
from pytest import MonkeyPatch
from tests.test_utils import fake_fn
async def fake_run_in(
self: Controller, fn: Callable[..., Any], delay: float, **kwargs: Any
) -> "Task[None]":
async def inner() -> None:
await asyncio.sleep(delay)
await fn(kwargs)
task = asyncio.create_task(inner())
return task
async def fake_cancel_timer(self: Controller, task: "Task[None]") -> bool:
return task.cancel()
@pytest.fixture(autouse=True)
def hass_mock(monkeypatch: MonkeyPatch) -> None:
"""
Fixture for set up the tests, mocking appdaemon functions
"""
monkeypatch.setattr(hass.Hass, "__init__", fake_fn())
monkeypatch.setattr(hass.Hass, "listen_event", fake_fn(async_=True))
monkeypatch.setattr(mqtt.Mqtt, "listen_event", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "listen_state", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "log", fake_fn())
monkeypatch.setattr(hass.Hass, "call_service", fake_fn(async_=True))
monkeypatch.setattr(hass.Hass, "get_ad_version", fake_fn(to_return="4.0.0"))
monkeypatch.setattr(hass.Hass, "run_in", fake_run_in)
monkeypatch.setattr(hass.Hass, "cancel_timer", fake_cancel_timer)
|
import argparse
from pathlib import Path
from nndet.io import load_pickle
from nndet.core.boxes.ops_np import box_center_np
THRESHOLD = 0.5
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('source', type=Path)
args = parser.parse_args()
source = args.source
predictions = load_pickle(source / "case_boxes.pkl")
boxes = predictions["pred_boxes"]
scores = predictions["pred_scores"]
keep = scores > THRESHOLD
boxes = boxes[keep]
if boxes.size > 0:
centers = box_center_np(boxes)
else:
centers = []
with open(source / "result.txt", "a") as f:
if len(centers) > 0:
for c in centers[:-1]:
f.write(f"{round(float(c[2]))}, {round(float(c[1]))}, {round(float(c[0]))}\n")
c = centers[-1]
f.write(f"{round(float(c[2]))}, {round(float(c[1]))}, {round(float(c[0]))}")
|
import torch
import numpy as np
import argparse
import pandas as pd
import sys
import os
from torch import nn
from torch.nn import functional as F
import tqdm
import pprint
from src import utils as ut
import torchvision
from haven import haven_utils as hu
from haven import haven_chk as hc
from src import datasets, models
from torch.utils.data import DataLoader
import exp_configs
from torch.utils.data.sampler import RandomSampler
from src import wrappers
def trainval(exp_dict, savedir_base, reset, metrics_flag=True, datadir=None, cuda=False):
# bookkeeping
# ---------------
# get experiment directory
exp_id = hu.hash_dict(exp_dict)
savedir = os.path.join(savedir_base, exp_id)
if reset:
# delete and backup experiment
hc.delete_experiment(savedir, backup_flag=True)
# create folder and save the experiment dictionary
os.makedirs(savedir, exist_ok=True)
hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)
print(pprint.pprint(exp_dict))
print('Experiment saved in %s' % savedir)
# set seed
# ==================
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
device = 'cuda'
torch.cuda.manual_seed_all(seed)
assert torch.cuda.is_available(), 'cuda is not, available please run with "-c 0"'
else:
device = 'cpu'
print('Running on device: %s' % device)
# Dataset
# Load val set and train set
val_set = datasets.get_dataset(dataset_name=exp_dict["dataset"], split="val",
transform=exp_dict.get("transform"),
datadir=datadir)
train_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
split="train",
transform=exp_dict.get("transform"),
datadir=datadir)
# Load train loader, val loader, and vis loader
train_loader = DataLoader(train_set,
sampler=RandomSampler(train_set,
replacement=True, num_samples=max(min(500,
len(train_set)),
len(val_set))),
batch_size=exp_dict["batch_size"])
val_loader = DataLoader(val_set, shuffle=False, batch_size=exp_dict["batch_size"])
vis_loader = DataLoader(val_set, sampler=ut.SubsetSampler(train_set,
indices=[0, 1, 2]),
batch_size=1)
# Create model, opt, wrapper
model_original = models.get_model(exp_dict["model"], exp_dict=exp_dict).cuda()
opt = torch.optim.Adam(model_original.parameters(),
lr=1e-5, weight_decay=0.0005)
model = wrappers.get_wrapper(exp_dict["wrapper"], model=model_original, opt=opt).cuda()
score_list = []
# Checkpointing
# =============
score_list_path = os.path.join(savedir, "score_list.pkl")
model_path = os.path.join(savedir, "model_state_dict.pth")
opt_path = os.path.join(savedir, "opt_state_dict.pth")
if os.path.exists(score_list_path):
# resume experiment
score_list = ut.load_pkl(score_list_path)
model.load_state_dict(torch.load(model_path))
opt.load_state_dict(torch.load(opt_path))
s_epoch = score_list[-1]["epoch"] + 1
else:
# restart experiment
score_list = []
s_epoch = 0
# Run training and validation
for epoch in range(s_epoch, exp_dict["max_epoch"]):
score_dict = {"epoch": epoch}
# visualize
# model.vis_on_loader(vis_loader, savedir=os.path.join(savedir, "images"))
# validate
score_dict.update(model.val_on_loader(val_loader))
# train
score_dict.update(model.train_on_loader(train_loader))
# Add score_dict to score_list
score_list += [score_dict]
# Report and save
print(pd.DataFrame(score_list).tail())
hu.save_pkl(score_list_path, score_list)
hu.torch_save(model_path, model.state_dict())
hu.torch_save(opt_path, opt.state_dict())
print("Saved in %s" % savedir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_group_list', nargs='+')
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-d', '--datadir', required=True)
parser.add_argument('-r', '--reset', default=0, type=int)
parser.add_argument('-ei', '--exp_id', default=None)
parser.add_argument('-c', '--cuda', type=int, default=1)
args = parser.parse_args()
# Collect experiments
# -------------------
if args.exp_id is not None:
# select one experiment
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, 'exp_dict.json'))
exp_list = [exp_dict]
else:
# select exp group
exp_list = []
for exp_group_name in args.exp_group_list:
exp_list += exp_configs.EXP_GROUPS[exp_group_name]
####
# Run experiments or View them
# ----------------------------
# run experiments
for exp_dict in exp_list:
# do trainval
trainval(exp_dict=exp_dict,
savedir_base=args.savedir_base,
reset=args.reset,
datadir=args.datadir,
cuda=args.cuda)
|
#!/usr/bin/python
'''
Generate a command file for automated kSim experiment with out-of-dataset queries
Example:
./gen_ksim_outside_exp.py 10 10 dataset.txt 0.3 experiment.txt results.txt -k 1 3 5 7 -m 7 --seed 23 -n 10
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
def pick_random(count, length, min_length=2, n=10):
picked = []
for i in range(n):
idx = random.randint(0, count - 1)
start = random.randint(0, length - min_length)
end = start + random.randint(min_length, length - start)
picked.append((idx, start, end))
return picked
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Picks random time series given the '
'dimension of a dataset.')
parser.add_argument('count', type=int, help='number of items in the query set.')
parser.add_argument('length', type=int, help='length of each item in the dataset.')
parser.add_argument('ds_path', help='path to the dataset used in the experiment')
parser.add_argument('q_path', help='path to the query file used in the experiment')
parser.add_argument('st', type=float, help='similarity threshold for the experiment')
parser.add_argument('exp_path', help='path to the K-ONEX experiment script')
parser.add_argument('exp_result_path', help='path for the result file')
parser.add_argument('-k', nargs='+',
help='number of similar time series to look for.'
'Multiple values can be specified (separated by space).')
parser.add_argument('-m', help='maximum number of multiple of h '
'(number of time series to be examined).')
parser.add_argument('-n', type=int, default=10,
help='number of sequences to be picked (default: 10).')
parser.add_argument('--paa', type=int, nargs='+',
help='block sizes for PAA.')
parser.add_argument('--seed', type=int,
help='seed for the random number generator.')
parser.add_argument('--min-length', type=int, default=10,
help='minimum length of each sequence (default: 10).')
parser.add_argument('--fmt', default='{0} [{1}, {2}]',
help='python format for output (default: {0} [{1}, {2}])')
args = parser.parse_args()
random.seed(args.seed)
seq = pick_random(args.count, args.length, args.min_length, args.n)
for s in seq:
print(args.fmt.format(s[0], s[1], s[2]))
print()
with open(args.exp_path, 'w') as f:
print('load {}'.format(args.ds_path), file=f)
print('load {}'.format(args.q_path), file=f)
group_file = '%s_GROUPS_%.1f' % (args.ds_path, args.st)
if os.path.exists(group_file):
print('loadGroup 0 {}'.format(group_file), file=f)
else:
print('group 0 {}'.format(args.st), file=f)
print('saveGroup 0 {}'.format(group_file), file=f)
print('testSim {}'.format(args.exp_result_path), file=f)
for b in args.paa:
for k in args.k:
for s in seq:
print('testSim {} {} {} 0 1 {} {} {}'.format(k, args.m, b, s[0], s[1], s[2]),
file=f)
print('Experiment script is generated at {}'.format(args.exp_path))
|
#!/usr/bin/env python
"""PMFP.
一个项目管理脚手架.
"""
import warnings
from .entrypoint import ppm
import sys
from typing import List
from pmfp.entrypoint import ppm
from colorama import init
init()
def main(argv: List[str] = sys.argv[1:]) -> None:
"""服务启动入口.
设置覆盖顺序`环境变量>命令行参数`>`'-c'指定的配置文件`>`项目启动位置的配置文件`>默认配置.
"""
ppm(argv)
return None
if __name__ == "__main__":
main(sys.argv[1:])
|
import os
import re
import json
import itertools
from typing import List, Union, Any
import pytest
from client.client import Client
from tools import utils
from tools.constants import IDENTITIES
from .contract_paths import (
CONTRACT_PATH,
ILLTYPED_CONTRACT_PATH,
all_contracts,
all_legacy_contracts,
)
def file_basename(path):
return os.path.splitext(os.path.basename(path))[0]
# Generic piece of code to originate a contract
def originate(
client,
session,
contract,
init_storage,
amount,
contract_name=None,
sender='bootstrap1',
baker='bootstrap5',
arguments=None,
):
if contract_name is None:
contract_name = file_basename(contract)
args = ['--init', init_storage, '--burn-cap', '10.0']
if arguments is not None:
args += arguments
origination = client.originate(
contract_name, amount, sender, contract, args
)
session['contract'] = origination.contract
print(origination.contract)
utils.bake(client, baker)
assert utils.check_block_contains_operations(
client, [origination.operation_hash]
)
return origination
@pytest.mark.contract
@pytest.mark.incremental
class TestManager:
def test_manager_origination(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'entrypoints', 'manager.tz')
pubkey = IDENTITIES['bootstrap2']['identity']
originate(client, session, path, f'"{pubkey}"', 1000)
originate(
client, session, path, f'"{pubkey}"', 1000, contract_name="manager2"
)
def test_delegatable_origination(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'delegatable_target.tz'
)
pubkey = IDENTITIES['bootstrap2']['identity']
originate(
client, session, path, f'Pair "{pubkey}" (Pair "hello" 45)', 1000
)
def test_target_with_entrypoints_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'big_map_entrypoints.tz'
)
originate(
client, session, path, 'Pair {} {}', 1000, contract_name='target'
)
def test_target_without_entrypoints_origination(
self, client: Client, session
):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'no_entrypoint_target.tz'
)
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='target_no_entrypoints',
)
def test_target_without_default_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'entrypoints', 'no_default_target.tz'
)
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='target_no_default',
)
def test_target_with_root_origination(self, client: Client, session):
path = os.path.join(CONTRACT_PATH, 'entrypoints', 'rooted_target.tz')
originate(
client,
session,
path,
'Pair "hello" 42',
1000,
contract_name='rooted_target',
)
def test_manager_set_delegate(self, client: Client):
client.set_delegate('manager', 'bootstrap2', [])
utils.bake(client, 'bootstrap5')
bootstrap2_pkh = IDENTITIES['bootstrap2']['identity']
client.set_delegate('delegatable_target', bootstrap2_pkh, [])
utils.bake(client, 'bootstrap5')
delegate = IDENTITIES['bootstrap2']['identity']
assert client.get_delegate('manager', []).delegate == delegate
assert (
client.get_delegate('delegatable_target', []).delegate == delegate
)
client.set_delegate('manager', 'bootstrap3', [])
utils.bake(client, 'bootstrap5')
client.set_delegate('delegatable_target', 'bootstrap3', [])
utils.bake(client, 'bootstrap5')
delegate = IDENTITIES['bootstrap3']['identity']
assert client.get_delegate('manager', []).delegate == delegate
assert (
client.get_delegate('delegatable_target', []).delegate == delegate
)
def test_manager_withdraw_delegate(self, client: Client):
client.withdraw_delegate('manager', [])
utils.bake(client, 'bootstrap5')
client.withdraw_delegate('delegatable_target', [])
utils.bake(client, 'bootstrap5')
assert client.get_delegate('manager', []).delegate is None
assert client.get_delegate('delegatable_target', []).delegate is None
def test_transfer_to_manager(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10.001
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'bootstrap2',
'manager',
['--gas-limit', f'{128 * 15450 + 108}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.000548
fee_mutez = utils.mutez_of_tez(fee)
assert balance + amount_mutez == new_balance
assert (
balance_bootstrap - fee_mutez - amount_mutez
== new_balance_bootstrap
)
def test_simple_transfer_from_manager_to_implicit(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10.1
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'manager',
'bootstrap2',
['--gas-limit', f'{128 * 26350 + 12}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.000794
fee_mutez = utils.mutez_of_tez(fee)
assert balance - amount_mutez == new_balance
assert (
balance_bootstrap + amount_mutez - fee_mutez
== new_balance_bootstrap
)
def test_transfer_from_manager_to_manager(self, client: Client):
balance = client.get_mutez_balance('manager')
balance_dest = client.get_mutez_balance('manager2')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
amount = 10
amount_mutez = utils.mutez_of_tez(amount)
client.transfer(
amount,
'manager',
'manager2',
['--gas-limit', f'{128 * 44950 + 112}'],
)
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_dest = client.get_mutez_balance('manager2')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.001124
fee_mutez = utils.mutez_of_tez(fee)
assert balance_bootstrap - fee_mutez == new_balance_bootstrap
assert balance - amount_mutez == new_balance
assert balance_dest + amount_mutez == new_balance_dest
def test_transfer_from_manager_to_default(self, client: Client):
client.transfer(
10, 'manager', 'bootstrap2', ['--entrypoint', 'default']
)
utils.bake(client, 'bootstrap5')
client.transfer(10, 'manager', 'manager', ['--entrypoint', 'default'])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_target(self, client: Client):
client.transfer(10, 'manager', 'target', ['--burn-cap', '0.356'])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_entrypoint_with_args(
self, client: Client
):
arg = 'Pair "hello" 42'
# using 'transfer'
client.transfer(
0,
'manager',
'target',
['--entrypoint', 'add_left', '--arg', arg, '--burn-cap', '0.067'],
)
utils.bake(client, 'bootstrap5')
client.transfer(
0,
'manager',
'target',
['--entrypoint', 'mem_left', '--arg', '"hello"'],
)
utils.bake(client, 'bootstrap5')
# using 'call'
client.call(
'manager',
'target',
['--entrypoint', 'add_left', '--arg', arg, '--burn-cap', '0.067'],
)
utils.bake(client, 'bootstrap5')
client.call(
'manager',
'target',
['--entrypoint', 'mem_left', '--arg', '"hello"'],
)
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_no_entrypoint_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(0, 'manager', 'target_no_entrypoints', ['--arg', arg])
utils.bake(client, 'bootstrap5')
client.call('manager', 'target_no_entrypoints', ['--arg', arg])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_no_default_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(0, 'manager', 'target_no_default', ['--arg', arg])
utils.bake(client, 'bootstrap5')
client.call('manager', 'target_no_default', ['--arg', arg])
utils.bake(client, 'bootstrap5')
def test_transfer_from_manager_to_rooted_target_with_args(
self, client: Client
):
arg = 'Left Unit'
client.transfer(
0,
'manager',
'rooted_target',
['--arg', arg, '--entrypoint', 'root'],
)
utils.bake(client, 'bootstrap5')
client.call(
'manager', 'rooted_target', ['--arg', arg, '--entrypoint', 'root']
)
utils.bake(client, 'bootstrap5')
def test_transfer_json_to_entrypoint_with_args(self, client):
balance = client.get_mutez_balance('manager')
balance_bootstrap = client.get_mutez_balance('bootstrap2')
fee = 0.0123
fee_mutez = utils.mutez_of_tez(fee)
json_obj = [
{
"destination": "target",
"amount": "0",
"fee": str(fee),
"gas-limit": "65942",
"storage-limit": "1024",
"arg": 'Pair "hello" 42',
"entrypoint": "add_left",
}
]
json_ops = json.dumps(json_obj, separators=(',', ':'))
client.run(client.cmd_batch('manager', json_ops))
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap = client.get_mutez_balance('bootstrap2')
assert balance == new_balance
assert balance_bootstrap - fee_mutez == new_balance_bootstrap
def test_multiple_transfers(self, client):
balance = client.get_mutez_balance('manager')
balance_bootstrap2 = client.get_mutez_balance('bootstrap2')
balance_bootstrap3 = client.get_mutez_balance('bootstrap3')
amount_2 = 10.1
amount_mutez_2 = utils.mutez_of_tez(amount_2)
amount_3 = 11.01
amount_mutez_3 = utils.mutez_of_tez(amount_3)
json_obj = [
{"destination": "bootstrap2", "amount": str(amount_2)},
{"destination": "bootstrap3", "amount": str(amount_3)},
]
json_ops = json.dumps(json_obj, separators=(',', ':'))
client.run(client.cmd_batch('manager', json_ops))
utils.bake(client, 'bootstrap5')
new_balance = client.get_mutez_balance('manager')
new_balance_bootstrap2 = client.get_mutez_balance('bootstrap2')
new_balance_bootstrap3 = client.get_mutez_balance('bootstrap3')
fee_mutez = 794 + 698
assert balance - amount_mutez_2 - amount_mutez_3 == new_balance
assert (
balance_bootstrap2 + amount_mutez_2 - fee_mutez
== new_balance_bootstrap2
)
assert balance_bootstrap3 + amount_mutez_3 == new_balance_bootstrap3
# This test to verifies contract execution order. There are 3
# contracts: Storer, Caller, and Appender. Storer appends its argument
# to storage. Caller calls the list of unit contracts in its
# storage. Appender calls the string contract in its storage with a
# stored argument.
#
# For each test, there is one unique Storer. Each test is
# parameterized by a tree and the expected final storage of the
# Storer. A leaf in the tree is a string. Inner nodes are lists of
# leafs/inner nodes. The test maps maps over this tree to build a
# tree of contracts. Leaf nodes map to Appender contracts calling
# the Storer. Inner nodes map to Caller contract that calling
# children.
#
# Example. Given the tree: ["A", ["B"], "C"], we obtain
# Caller([Appender("A"), Caller([Appender("B")]), Appender("C")])
# Before the protocol 009, contract execution order was in BFS
# In BFS, Storer would've ended up with storage ACB.
# In DFS, Storer will end up with storage ABC.
@pytest.mark.contract
@pytest.mark.incremental
class TestExecutionOrdering:
STORER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_storer.tz'
CALLER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_caller.tz'
APPENDER = f'{CONTRACT_PATH}/mini_scenarios/execution_order_appender.tz'
def originate_storer(self, client: Client, session: dict):
origination = originate(
client, session, self.STORER, '""', 0, arguments=['--force']
)
session['storer'] = origination.contract
utils.bake(client, 'bootstrap3')
return origination.contract
def originate_appender(
self, client: Client, session: dict, storer: str, argument: str
):
origination = originate(
client,
session,
self.APPENDER,
f'Pair "{storer}" "{argument}"',
0,
contract_name=f'appender-{argument}',
arguments=['--force'],
)
session[f'appender.{argument}'] = origination.contract
utils.bake(client, 'bootstrap3')
return origination.contract
def originate_caller(
self, client: Client, session: dict, callees: List[str]
):
storage = "{" + '; '.join(map('"{}"'.format, callees)) + "}"
origination = originate(
client,
session,
self.CALLER,
storage,
0,
contract_name=f'caller-{hash(storage)}',
)
utils.bake(client, 'bootstrap3')
return origination.contract
@pytest.mark.parametrize(
"tree, expected",
[
# before 009, the result should be "DABCEFG".
([["A", "B", "C"], "D", ["E", "F", "G"]], "ABCDEFG"),
# before 009, the result should be "ACB".
([["A", ["B"], "C"]], "ABC"),
# before 009, the result should be "ABDC".
([["A", ["B", ["C"], "D"]]], "ABCD"),
([], ""),
],
)
def test_ordering(
self,
client: Client,
session: dict,
# approximation of recursive type annotation
tree: Union[str, List[Any]],
expected: str,
):
storer = self.originate_storer(client, session)
def deploy_tree(tree: Union[str, List[Any]]) -> str:
# leaf
if isinstance(tree, str):
# deploy and return caller str
return self.originate_appender(client, session, storer, tree)
# inner node
children = list(map(deploy_tree, tree))
return self.originate_caller(client, session, children)
root = deploy_tree(tree)
client.transfer(
0,
'bootstrap2',
root,
["--burn-cap", "5"],
)
utils.bake(client, 'bootstrap3')
assert client.get_storage(storer) == '"{}"'.format(expected)
@pytest.mark.slow
@pytest.mark.contract
class TestContracts:
"""Test type checking and execution of a bunch of contracts"""
@pytest.mark.parametrize("contract", all_contracts())
def test_typecheck(self, client: Client, contract):
assert contract.endswith(
'.tz'
), "test contract should have .tz extension"
client.typecheck(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.parametrize("contract", all_legacy_contracts())
def test_deprecated_typecheck_breaks(self, client, contract):
if contract in [
"legacy/create_contract.tz",
"legacy/create_contract_flags.tz",
"legacy/create_contract_rootname.tz",
]:
with utils.assert_run_failure(r'ill-typed script'):
client.typecheck(os.path.join(CONTRACT_PATH, contract))
else:
with utils.assert_run_failure(r'Use of deprecated instruction'):
client.typecheck(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.parametrize("contract", all_legacy_contracts())
def test_deprecated_typecheck_in_legacy(self, client, contract):
if contract in [
"legacy/create_contract.tz",
"legacy/create_contract_flags.tz",
"legacy/create_contract_rootname.tz",
]:
with utils.assert_run_failure(r'ill-typed script'):
client.typecheck(
os.path.join(CONTRACT_PATH, contract), legacy=True
)
else:
with utils.assert_run_failure(r'Use of deprecated instruction'):
client.typecheck(
os.path.join(CONTRACT_PATH, contract), legacy=True
)
@pytest.mark.parametrize(
"contract,error_pattern",
[
# operations cannot be PACKed
(
"pack_operation.tz",
r'operation type forbidden in parameter, storage and constants',
),
# big_maps cannot be PACKed
(
"pack_big_map.tz",
r'big_map or sapling_state type not expected here',
),
(
"invalid_self_entrypoint.tz",
r'Contract has no entrypoint named D',
),
("contract_annotation_default.tz", r'unexpected annotation'),
# Missing field
(
"missing_only_storage_field.tz",
r'Missing contract field: storage',
),
("missing_only_code_field.tz", r'Missing contract field: code'),
(
"missing_only_parameter_field.tz",
r'Missing contract field: parameter',
),
(
"missing_parameter_and_storage_fields.tz",
r'Missing contract field: parameter',
),
# Duplicated field
(
"multiple_parameter_field.tz",
r'duplicate contract field: parameter',
),
("multiple_code_field.tz", r'duplicate contract field: code'),
("multiple_storage_field.tz", r'duplicate contract field: storage'),
# The first duplicated field is reported, storage in this case
(
"multiple_storage_and_code_fields.tz",
r'duplicate contract field: storage',
),
# error message for set update on non-comparable type
(
"set_update_non_comparable.tz",
r'Type nat is not compatible with type list operation',
),
# error message for the arity of the chain_id type
(
"chain_id_arity.tz",
r'primitive chain_id expects 0 arguments but is given 1',
),
# error message for DIP over the limit
("big_dip.tz", r'expected a positive 10-bit integer'),
# error message for DROP over the limit
("big_drop.tz", r'expected a positive 10-bit integer'),
# error message for set update on non-comparable type
(
"set_update_non_comparable.tz",
r'Type nat is not compatible with type list operation',
),
# error message for attempting to push a value of type never
("never_literal.tz", r'type never has no inhabitant.'),
# field annotation mismatch with UNPAIR
(
"unpair_field_annotation_mismatch.tz",
r'The field access annotation does not match',
),
# COMB, UNCOMB, and DUP cannot take 0 as argument
("comb0.tz", r"PAIR expects an argument of at least 2"),
("comb1.tz", r"PAIR expects an argument of at least 2"),
("uncomb0.tz", r"UNPAIR expects an argument of at least 2"),
("uncomb1.tz", r"UNPAIR expects an argument of at least 2"),
("dup0.tz", r"DUP n expects an argument of at least 1"),
(
"push_big_map_with_id_with_parens.tz",
r"big_map or sapling_state type not expected here",
),
(
"push_big_map_with_id_without_parens.tz",
r"primitive PUSH expects 2 arguments but is given 4",
),
# sapling_state is not packable
(
"pack_sapling_state.tz",
r"big_map or sapling_state type not expected here",
),
# sapling_state is not packable
(
"unpack_sapling_state.tz",
r"big_map or sapling_state type not expected here",
),
# Ticket duplication attempt
("ticket_dup.tz", r'DUP used on the non-dupable type ticket nat'),
# error message for ticket unpack
("ticket_unpack.tz", r'Ticket in unauthorized position'),
# error message for attempting to use APPLY to capture a ticket
("ticket_apply.tz", r'Ticket in unauthorized position'),
# error message for attempting to wrap a ticket in a ticket
(
"ticket_in_ticket.tz",
r'comparable type expected.Type ticket unit is not comparable',
),
],
)
def test_ill_typecheck(self, client: Client, contract, error_pattern):
with utils.assert_run_failure(error_pattern):
client.typecheck(os.path.join(ILLTYPED_CONTRACT_PATH, contract))
def test_zero_transfer_to_implicit_contract(self, client):
pubkey = IDENTITIES['bootstrap3']['identity']
err = (
'Transaction of 0ꜩ towards a contract without code are '
rf'forbidden \({pubkey}\).'
)
with utils.assert_run_failure(err):
client.transfer(0, 'bootstrap2', 'bootstrap3', [])
def test_zero_transfer_to_nonexistent_contract(self, client):
nonexistent = "KT1Fcq4inD44aMhmUiTEHR1QMQwJT7p2u641"
err = rf'Contract {nonexistent} does not exist'
with utils.assert_run_failure(err):
client.transfer(0, 'bootstrap2', nonexistent, [])
FIRST_EXPLOSION = '''
{ parameter unit;
storage unit;
code{ DROP; PUSH nat 0 ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DROP ; UNIT ; NIL operation ; PAIR} }
'''
# FIRST_EXPLOSION costs a large amount of gas just for typechecking.
# FIRST_EXPLOSION_BIGTYPE type size exceeds the protocol set bound.
FIRST_EXPLOSION_BIGTYPE = '''
{ parameter unit;
storage unit;
code{ DROP; PUSH nat 0 ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DUP ; PAIR ;
DROP ; UNIT ; NIL operation ; PAIR} }
'''
SECOND_EXPLOSION = '''
{ parameter (list int) ;
storage (list (list (list int))) ;
code { CAR ; DIP { NIL (list int) } ;
DUP ; ITER { DROP ; DUP ; DIP { CONS } } ;
DROP ; DIP { NIL (list (list int)) } ;
DUP ; ITER { DROP ; DUP ; DIP { CONS } } ;
DROP ; NIL operation ; PAIR } }
'''
@pytest.mark.contract
class TestGasBound:
def test_write_contract(self, tmpdir, session: dict):
items = {
'first_explosion.tz': FIRST_EXPLOSION,
'first_explosion_bigtype.tz': FIRST_EXPLOSION_BIGTYPE,
'second_explosion.tz': SECOND_EXPLOSION,
}.items()
for name, script in items:
contract = f'{tmpdir}/{name}'
with open(contract, 'w') as contract_file:
contract_file.write(script)
session[name] = contract
def test_originate_first_explosion(self, client: Client, session: dict):
name = 'first_explosion.tz'
contract = session[name]
client.typecheck(contract)
args = ['-G', f'{1870}', '--burn-cap', '10']
expected_error = "Gas limit exceeded during typechecking or execution"
with utils.assert_run_failure(expected_error):
client.originate(f'{name}', 0, 'bootstrap1', contract, args)
def test_originate_big_type(self, client: Client, session: dict):
name = 'first_explosion_bigtype.tz'
contract = session[name]
# We could not be bothered with finding how to escape parentheses
# so we put dots
expected_error = "type size .1023. exceeded maximum type size .1000."
with utils.assert_run_failure(expected_error):
client.typecheck(contract)
def test_originate_second_explosion(self, client: Client, session: dict):
name = 'second_explosion.tz'
contract = session[name]
storage = '{}'
inp = '{1;2;3;4;5;6;7;8;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1;1}'
client.run_script(contract, storage, inp)
def test_originate_second_explosion_fail(
self, client: Client, session: dict
):
name = 'second_explosion.tz'
contract = session[name]
storage = '{}'
inp = (
'{1;2;3;4;5;6;7;8;9;0;1;2;3;4;5;6;7;1;1;1;1;1;1;1;1;1;1;1'
+ ';1;1;1;1;1;1;1;1;1;1;1;1;1;1}'
)
expected_error = (
"Cannot serialize the resulting storage"
+ " value within the provided gas bounds."
)
with utils.assert_run_failure(expected_error):
client.run_script(contract, storage, inp, gas=9290)
def test_typecheck_map_dup_key(self, client: Client):
expected_error = (
'Map literals cannot contain duplicate'
+ ' keys, however a duplicate key was found'
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ Elt 0 1 ; Elt 0 1}', '(map nat nat)')
def test_typecheck_map_bad_ordering(self, client: Client):
expected_error = (
"Keys in a map literal must be in strictly"
+ " ascending order, but they were unordered in literal"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data(
'{ Elt 0 1 ; Elt 10 1 ; Elt 5 1 }', '(map nat nat)'
)
def test_typecheck_set_bad_ordering(self, client: Client):
expected_error = (
"Values in a set literal must be in strictly"
+ " ascending order, but they were unordered in literal"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ "A" ; "C" ; "B" }', '(set string)')
def test_typecheck_set_no_duplicates(self, client: Client):
expected_error = (
"Set literals cannot contain duplicate values,"
+ " however a duplicate value was found"
)
with utils.assert_run_failure(expected_error):
client.typecheck_data('{ "A" ; "B" ; "B" }', '(set string)')
@pytest.mark.contract
class TestChainId:
def test_chain_id_opcode(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'opcodes', 'chain_id.tz')
originate(client, session, path, 'Unit', 0)
client.call('bootstrap2', "chain_id", [])
utils.bake(client, 'bootstrap5')
def test_chain_id_authentication_origination(self, client: Client, session):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'authentication.tz'
)
pubkey = IDENTITIES['bootstrap1']['public']
originate(client, session, path, f'Pair 0 "{pubkey}"', 1000)
utils.bake(client, 'bootstrap5')
def test_chain_id_authentication_first_run(
self, client: Client, session: dict
):
destination = IDENTITIES['bootstrap2']['identity']
operation = (
'{DROP; NIL operation; '
+ f'PUSH address "{destination}"; '
+ 'CONTRACT unit; ASSERT_SOME; PUSH mutez 1000; UNIT; '
+ 'TRANSFER_TOKENS; CONS}'
)
chain_id = client.rpc('get', 'chains/main/chain_id')
contract_address = session['contract']
packed = client.pack(
f'Pair (Pair "{chain_id}" "{contract_address}") '
+ f'(Pair {operation} 0)',
'pair (pair chain_id address)'
+ '(pair (lambda unit (list operation)) nat)',
)
signature = client.sign_bytes_of_string(packed, "bootstrap1")
client.call(
'bootstrap2',
'authentication',
['--arg', f'Pair {operation} \"{signature}\"'],
)
utils.bake(client, 'bootstrap5')
@pytest.mark.contract
class TestBigMapToSelf:
def test_big_map_to_self_origination(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'opcodes', 'big_map_to_self.tz')
originate(client, session, path, '{}', 0)
utils.bake(client, 'bootstrap5')
def test_big_map_to_self_transfer(self, client: Client):
client.call('bootstrap2', "big_map_to_self", [])
utils.bake(client, 'bootstrap5')
client.transfer(0, 'bootstrap2', "big_map_to_self", [])
utils.bake(client, 'bootstrap5')
@pytest.mark.contract
class TestNonRegression:
"""Test contract-related non-regressions"""
def test_issue_242_originate(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'non_regression', 'bug_262.tz')
originate(client, session, path, 'Unit', 1)
def test_issue_242_assert_balance(self, client: Client):
assert client.get_balance('bug_262') == 1
@pytest.mark.contract
class TestMiniScenarios:
"""Test mini scenarios"""
# replay.tz related tests
def test_replay_originate(self, client: Client, session: dict):
path = os.path.join(CONTRACT_PATH, 'mini_scenarios', 'replay.tz')
originate(client, session, path, 'Unit', 0)
def test_replay_transfer_fail(self, client: Client):
with utils.assert_run_failure("Internal operation replay attempt"):
client.transfer(10, "bootstrap1", "replay", [])
# create_contract.tz related tests
def test_create_contract_originate(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'create_contract.tz'
)
originate(client, session, path, 'Unit', 1000)
def test_create_contract_balance(self, client: Client):
assert client.get_balance('create_contract') == 1000
def test_create_contract_perform_creation(self, client: Client):
transfer_result = client.transfer(
0,
"bootstrap1",
"create_contract",
['-arg', 'None', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
pattern = r"New contract (\w*) originated"
match = re.search(pattern, transfer_result.client_output)
assert match is not None
kt_1 = match.groups()[0]
assert client.get_storage(kt_1) == '"abcdefg"'
assert client.get_balance(kt_1) == 100
assert client.get_balance('create_contract') == 900
# Originates a contract that when called, creates a contract with a
# rootname annotation. Such annotations comes in two flavors, thus the
# parameterization. Then calls the first contract and verifies the
# existence and type of the root entrypoint of the create contract.
@pytest.mark.parametrize(
"contract",
[
'create_contract_rootname.tz',
'create_contract_rootname_alt.tz',
],
)
def test_create_contract_rootname_originate(
self, client: Client, session: dict, contract
):
path = os.path.join(CONTRACT_PATH, 'opcodes', contract)
origination_res = originate(client, session, path, 'None', 1000)
transfer_result = client.transfer(
0,
"bootstrap1",
origination_res.contract,
['-arg', 'Unit', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
pattern = r"New contract (\w*) originated"
match = re.search(pattern, transfer_result.client_output)
assert match is not None
kt_1 = match.groups()[0]
entrypoint_type = client.get_contract_entrypoint_type(
'root', kt_1
).entrypoint_type
assert entrypoint_type == 'unit', (
'the entrypoint my_root of the originated contract should exist'
'with type unit'
)
# default_account.tz related tests
def test_default_account_originate(self, client: Client, session: dict):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'default_account.tz'
)
originate(client, session, path, 'Unit', 1000)
def test_default_account_transfer_then_bake(self, client: Client):
tz1 = IDENTITIES['bootstrap4']['identity']
client.transfer(
0,
"bootstrap1",
"default_account",
['-arg', f'"{tz1}"', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
account = 'tz1SuakBpFdG9b4twyfrSMqZzruxhpMeSrE5'
client.transfer(
0,
"bootstrap1",
"default_account",
['-arg', f'"{account}"', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
assert client.get_balance(account) == 100
# Test bytes, SHA252, CHECK_SIGNATURE
def test_reveal_signed_preimage_originate(
self, client: Client, session: dict
):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'reveal_signed_preimage.tz'
)
byt = (
'0x9995c2ef7bcc7ae3bd15bdd9b02'
+ 'dc6e877c27b26732340d641a4cbc6524813bb'
)
sign = 'p2pk66uq221795tFxT7jfNmXtBMdjMf6RAaxRTwv1dbuSHbH6yfqGwz'
storage = f'(Pair {byt} "{sign}")'
originate(client, session, path, storage, 1000)
def test_wrong_preimage(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f75732'
+ '0636f75636865722061766563206d6f692c20636520736f6972'
)
sign = (
'p2sigvgDSBnN1bUsfwyMvqpJA1cFhE5s5oi7SetJ'
+ 'VQ6LJsbFrU2idPvnvwJhf5v9DhM9ZTX1euS9DgWozVw6BTHiK9VcQVpAU8'
)
arg = f'(Pair {byt} "{sign}")'
# We check failure of ASSERT_CMPEQ in the script.
with utils.assert_run_failure("At line 8 characters 9 to 21"):
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
def test_wrong_signature(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f757320636'
+ 'f75636865722061766563206d6f692c20636520736f6972203f'
)
sign = (
'p2sigvgDSBnN1bUsfwyMvqpJA1cFhE5s5oi7SetJVQ6'
+ 'LJsbFrU2idPvnvwJhf5v9DhM9ZTX1euS9DgWozVw6BTHiK9VcQVpAU8'
)
arg = f'(Pair {byt} "{sign}")'
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 9 to 15"):
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
def test_good_preimage_and_signature(self, client: Client):
byt = (
'0x050100000027566f756c657a2d766f757320636f7563'
+ '6865722061766563206d6f692c20636520736f6972203f'
)
sign = (
'p2sigsceCzcDw2AeYDzUonj4JT341WC9Px4wdhHBxbZcG1F'
+ 'hfqFVuG7f2fGCzrEHSAZgrsrQWpxduDPk9qZRgrpzwJnSHC3gZJ'
)
arg = f'(Pair {byt} "{sign}")'
client.transfer(
0,
"bootstrap1",
"reveal_signed_preimage",
['-arg', arg, '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
# Test vote_for_delegate
def test_vote_for_delegate_originate(self, client: Client, session: dict):
b_3 = IDENTITIES['bootstrap3']['identity']
b_4 = IDENTITIES['bootstrap4']['identity']
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'vote_for_delegate.tz'
)
storage = f'''(Pair (Pair "{b_3}" None) (Pair "{b_4}" None))'''
originate(client, session, path, storage, 1000)
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_wrong_identity1(self, client: Client):
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 57 to 61"):
client.transfer(
0,
"bootstrap1",
"vote_for_delegate",
['-arg', 'None', '--burn-cap', '10'],
)
def test_vote_for_delegate_wrong_identity2(self, client: Client):
# We check failure of CHECK_SIGNATURE ; ASSERT in the script.
with utils.assert_run_failure("At line 15 characters 57 to 61"):
client.transfer(
0,
"bootstrap2",
"vote_for_delegate",
['-arg', 'None', '--burn-cap', '10'],
)
def test_vote_for_delegate_b3_vote_for_b5(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
client.transfer(
0,
"bootstrap3",
"vote_for_delegate",
['-arg', f'(Some "{b_5}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_5, storage)
def test_vote_for_delegate_still_no_delegate1(self, client: Client):
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_b4_vote_for_b2(self, client: Client):
b_2 = IDENTITIES['bootstrap2']['identity']
client.transfer(
0,
"bootstrap4",
"vote_for_delegate",
['-arg', f'(Some "{b_2}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_2, storage)
def test_vote_for_delegate_still_no_delegate2(self, client: Client):
assert client.get_delegate('vote_for_delegate').delegate is None
def test_vote_for_delegate_b4_vote_for_b5(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
client.transfer(
0,
"bootstrap4",
"vote_for_delegate",
['-arg', f'(Some "{b_5}")', '--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
storage = client.get_storage('vote_for_delegate')
assert re.search(b_5, storage)
def test_vote_for_delegate_has_delegate(self, client: Client):
b_5 = IDENTITIES['bootstrap5']['identity']
result = client.get_delegate('vote_for_delegate')
assert result.delegate == b_5
def test_multiple_entrypoints_counter(self, session: dict, client: Client):
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'multiple_entrypoints_counter.tz'
)
storage = 'None'
# originate contract
originate(client, session, path, storage, 0)
utils.bake(client, 'bootstrap5')
# call contract: creates the internal contract and calls it.
client.transfer(
0,
'bootstrap1',
'multiple_entrypoints_counter',
['--burn-cap', '10'],
)
utils.bake(client, 'bootstrap5')
assert client.get_storage('multiple_entrypoints_counter') == 'None', (
"The storage of the multiple_entrypoints_counter contract"
" should be None"
)
# Test CONTRACT with/without entrypoint annotation on literal address
# parameters with/without entrypoint annotation
def test_originate_simple_entrypoints(self, session: dict, client: Client):
"""originates the contract simple_entrypoints.tz
with entrypoint %A of type unit used in
test_simple_entrypoints"""
contract_target = os.path.join(
CONTRACT_PATH, 'entrypoints', 'simple_entrypoints.tz'
)
originate(client, session, contract_target, 'Unit', 0)
utils.bake(client, 'bootstrap5')
@pytest.mark.parametrize(
'contract_annotation, contract_type, param, expected_storage',
[
# tests passing adr to CONTRACT %A unit
# where adr has an entrypoint %A of type unit, is allowed.
('%A', 'unit', '"{adr}"', '(Some "{adr}%A")'),
('%B', 'string', '"{adr}"', '(Some "{adr}%B")'),
('%C', 'nat', '"{adr}"', '(Some "{adr}%C")'),
# tests passing adr%A to CONTRACT %A unit: redundant specification
# of entrypoint not allowed so CONTRACT returns None
('%A', 'unit', '"{adr}%A"', 'None'),
('%A', 'unit', '"{adr}%B"', 'None'),
('%A', 'unit', '"{adr}%D"', 'None'),
('%A', 'unit', '"{adr}%A"', 'None'),
('%B', 'unit', '"{adr}%A"', 'None'),
('%D', 'unit', '"{adr}%A"', 'None'),
# tests passing adr%A to CONTRACT unit:
# where adr has an entrypoint %A of type unit, is allowed.
('', 'unit', '"{adr}%A"', '(Some "{adr}%A")'),
('', 'string', '"{adr}%B"', '(Some "{adr}%B")'),
('', 'nat', '"{adr}%C"', '(Some "{adr}%C")'),
# tests passing adr%B to CONTRACT unit:
# as entrypoint %B of simple_entrypoints.tz has type string,
# CONTRACT will return None.
('', 'unit', '"{adr}%B"', 'None'),
# tests passing adr%D to CONTRACT unit:
# as entrypoint %D does not exist in simple_entrypoints.tz,
# CONTRACT will return None.
('', 'unit', '"{adr}%D"', 'None'),
# tests passing adr to CONTRACT unit:
# as adr does not have type unit, CONTRACT returns None.
('', 'unit', '"{adr}"', 'None'),
# entrypoint that does not exist
('%D', 'unit', '"{adr}"', 'None'),
# ill-typed entrypoints
('%A', 'int', '"{adr}"', 'None'),
('%B', 'unit', '"{adr}"', 'None'),
('%C', 'int', '"{adr}"', 'None'),
],
)
def test_simple_entrypoints(
self,
session,
client,
contract_annotation,
contract_type,
param,
expected_storage,
):
contract = f'''parameter address;
storage (option address);
code {{
CAR;
CONTRACT {contract_annotation} {contract_type};
IF_SOME {{ ADDRESS; SOME }} {{ NONE address; }};
NIL operation;
PAIR
}};'''
param = param.format(adr=session['contract'])
expected_storage = expected_storage.format(adr=session['contract'])
run_script_res = client.run_script(contract, 'None', param, file=False)
assert run_script_res.storage == expected_storage
@pytest.mark.contract
class TestComparables:
def test_comparable_unit(self, client):
client.typecheck_data('{}', '(set unit)')
client.typecheck_data('{Unit}', '(set unit)')
def test_comparable_options(self, client):
client.typecheck_data('{}', '(set (option nat))')
client.typecheck_data('{None; Some 1; Some 2}', '(set (option int))')
utils.assert_typecheck_data_failure(
client, '{Some "foo"; Some "bar"}', '(set (option string))'
)
utils.assert_typecheck_data_failure(
client, '{Some Unit; None}', '(set (option unit))'
)
def test_comparable_unions(self, client):
client.typecheck_data('{}', '(set (or unit bool))')
client.typecheck_data(
'{Left 3; Left 4; Right "bar"; Right "foo"}',
'(set (or nat string))',
)
utils.assert_typecheck_data_failure(
client, '{Left 2; Left 1}', '(set (or mutez unit))'
)
utils.assert_typecheck_data_failure(
client, '{Right True; Right False}', '(set (or unit bool))'
)
utils.assert_typecheck_data_failure(
client, '{Right 0; Left 1}', '(set (or nat nat))'
)
def test_comparable_pair(self, client: Client):
# tests that comb pairs are comparable and that the order is the
# expected one
client.typecheck_data('{}', '(set (pair nat string))')
client.typecheck_data('{Pair 0 "foo"}', '(set (pair nat string))')
client.typecheck_data(
'{Pair 0 "foo"; Pair 1 "bar"}', '(set (pair nat string))'
)
client.typecheck_data(
'{Pair 0 "bar"; Pair 0 "foo"; \
Pair 1 "bar"; Pair 1 "foo"}',
'(set (pair nat string))',
)
client.typecheck_data('{}', '(set (pair nat (pair string bytes)))')
client.typecheck_data('{}', '(map (pair nat string) unit)')
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit}', '(map (pair nat string) unit)'
)
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit}',
'(map (pair nat string) unit)',
)
client.typecheck_data(
'{Elt (Pair 0 "bar") Unit; \
Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit; \
Elt (Pair 1 "foo") Unit}',
'(map (pair nat string) unit)',
)
client.typecheck_data('{}', '(map (pair nat (pair string bytes)) unit)')
client.typecheck_data('{}', '(big_map (pair nat string) unit)')
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit}', '(big_map (pair nat string) unit)'
)
client.typecheck_data(
'{Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit}',
'(big_map (pair nat string) unit)',
)
client.typecheck_data(
'{Elt (Pair 0 "bar") Unit; \
Elt (Pair 0 "foo") Unit; \
Elt (Pair 1 "bar") Unit; \
Elt (Pair 1 "foo") Unit}',
'(big_map (pair nat string) unit)',
)
client.typecheck_data(
'{}', '(big_map (pair nat (pair string bytes)) unit)'
)
client.typecheck_data('{}', '(set (pair (pair nat nat) nat))')
client.typecheck_data(
'{}',
'(set (pair (pair int nat) \
(pair bool bytes)))',
)
def test_order_of_pairs(self, client: Client):
# tests that badly-ordered set literals are rejected
utils.assert_typecheck_data_failure(
client, '{Pair 0 "foo"; Pair 0 "bar"}', '(set (pair nat string))'
)
utils.assert_typecheck_data_failure(
client, '{Pair 1 "bar"; Pair 0 "foo"}', '(set (pair nat string))'
)
def test_comparable_chain_id(self, client):
client.typecheck_data('{}', '(set chain_id)')
chain1 = client.rpc('get', 'chains/main/chain_id')
chain2 = 'NetXZVhNXbDTx5M'
utils.assert_typecheck_data_failure(
client,
'{"' + f'{chain1}' + '"; "' + f'{chain2}' + '"}',
'(set chain_id)',
)
client.typecheck_data(
'{"' + f'{chain2}' + '"; "' + f'{chain1}' + '"}', '(set chain_id)'
)
def test_comparable_signature(self, client):
client.typecheck_data('{}', '(set signature)')
packed = client.pack('Unit', 'unit')
sig1 = client.sign_bytes_of_string(packed, "bootstrap1")
sig2 = client.sign_bytes_of_string(packed, "bootstrap2")
utils.assert_typecheck_data_failure(
client,
'{"' + f'{sig1}' + '"; "' + f'{sig2}' + '"}',
'(set signature)',
)
client.typecheck_data(
'{"' + f'{sig2}' + '"; "' + f'{sig1}' + '"}', '(set signature)'
)
def test_comparable_key(self, client):
pubkey1 = IDENTITIES['bootstrap1']['public']
pubkey2 = IDENTITIES['bootstrap2']['public']
client.typecheck_data('{}', '(set key)')
utils.assert_typecheck_data_failure(
client,
'{"' + f'{pubkey1}' + '"; "' + f'{pubkey2}' + '"}',
'(set key)',
)
client.typecheck_data(
'{"' + f'{pubkey2}' + '"; "' + f'{pubkey1}' + '"}', '(set key)'
)
def test_comparable_key_different_schemes(self, client):
client.gen_key('sk1', ['--sig', 'ed25519'])
key1 = client.show_address('sk1').public_key
client.gen_key('sk2', ['--sig', 'secp256k1'])
key2 = client.show_address('sk2').public_key
client.gen_key('sk3', ['--sig', 'p256'])
key3 = client.show_address('sk3').public_key
# Three public keys of the three different signature schemes, ordered
client.typecheck_data(
'{"' + key1 + '"; "' + key2 + '"; "' + key3 + '"}', '(set key)'
)
# Test all orderings that do not respect the comparable order
utils.assert_typecheck_data_failure(
client,
'{"' + key1 + '"; "' + key3 + '"; "' + key2 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key2 + '"; "' + key1 + '"; "' + key3 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key2 + '"; "' + key3 + '"; "' + key1 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key3 + '"; "' + key1 + '"; "' + key2 + '"}',
'(set key)',
)
utils.assert_typecheck_data_failure(
client,
'{"' + key3 + '"; "' + key2 + '"; "' + key1 + '"}',
'(set key)',
)
@pytest.mark.contract
class TestTypecheckingErrors:
def test_big_map_arity_error(self, client: Client):
error_pattern = (
'primitive EMPTY_BIG_MAP expects 2 arguments but is given 1.'
)
with utils.assert_run_failure(error_pattern):
client.typecheck(
os.path.join(CONTRACT_PATH, 'ill_typed', 'big_map_arity.tz')
)
BAD_ANNOT_TEST = '''
parameter bytes;
storage (option (lambda unit unit));
code { CAR; UNPACK (lambda unit unit); NIL operation; PAIR}
'''
@pytest.mark.contract
class TestBadAnnotation:
def test_write_contract_bad_annot(self, tmpdir, session: dict):
name = 'bad_annot.tz'
contract = f'{tmpdir}/{name}'
script = BAD_ANNOT_TEST
with open(contract, 'w') as contract_file:
contract_file.write(script)
session[name] = contract
def test_bad_annotation(self, client: Client, session: dict):
name = 'bad_annot.tz'
contract = session[name]
# This was produced by running "tezos-client hash data '{ UNIT
# ; PAIR ; CAR %faa }' of type 'lambda unit unit'" and
# replacing the two last bytes (that correspond to the two
# 'a's at the end of the annotation) by the 0xff byte which is
# not a valid UTF8-encoding of a string
parameter = '0x05020000000e034f03420416000000042566ffff'
res = client.run_script(contract, 'None', parameter)
assert res.storage == 'None'
@pytest.mark.contract
class TestOrderInTopLevelDoesNotMatter:
@pytest.fixture
def contract_splitted_in_top_level_elements(self):
return [
"parameter nat",
"storage unit",
"code { CDR; NIL operation; PAIR }",
]
def test_shuffle(
self, client: Client, contract_splitted_in_top_level_elements
):
"""
Test that the storage, code, and parameter sections can appear in any
order in a contract script.
"""
for shuffled_list in itertools.permutations(
contract_splitted_in_top_level_elements
):
contract = ";\n".join(shuffled_list)
client.typecheck(contract, file=False)
@pytest.mark.contract
@pytest.mark.regression
class TestSelfAddressTransfer:
def test_self_address_originate_sender(
self, client_regtest_scrubbed, session
):
client = client_regtest_scrubbed
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'self_address_sender.tz'
)
originate(client, session, path, 'Unit', 0)
def test_self_address_originate_receiver(
self, client_regtest_scrubbed, session
):
client = client_regtest_scrubbed
path = os.path.join(
CONTRACT_PATH, 'mini_scenarios', 'self_address_receiver.tz'
)
originate(client, session, path, 'Unit', 0)
session['receiver_address'] = session['contract']
def test_send_self_address(self, client_regtest_scrubbed, session):
client = client_regtest_scrubbed
receiver_address = session['receiver_address']
client.transfer(
0,
'bootstrap2',
'self_address_sender',
['--arg', f'"{receiver_address}"', '--burn-cap', '2'],
)
utils.bake(client, 'bootstrap5')
@pytest.mark.slow
@pytest.mark.contract
@pytest.mark.regression
class TestScriptHashRegression:
@pytest.mark.parametrize("contract", all_contracts())
def test_contract_hash(self, client_regtest: Client, contract):
client = client_regtest
assert contract.endswith(
'.tz'
), "test contract should have .tz extension"
client.hash_script(os.path.join(CONTRACT_PATH, contract))
@pytest.mark.contract
class TestScriptHashOrigination:
def test_contract_hash_with_origination(
self, client: Client, session: dict
):
script = 'parameter unit; storage unit; code {CAR; NIL operation; PAIR}'
originate(
client,
session,
contract=script,
init_storage='Unit',
amount=1000,
contract_name='dummy_contract',
)
hash1 = client.hash_script(script)
hash2 = client.get_script_hash('dummy_contract')
assert hash1 == hash2
@pytest.mark.contract
@pytest.mark.regression
class TestNormalize:
"""Regression tests for the "normalize data" command."""
modes = [None, 'Readable', 'Optimized', 'Optimized_legacy']
@pytest.mark.parametrize('mode', modes)
def test_normalize_unparsing_mode(self, client_regtest_scrubbed, mode):
client = client_regtest_scrubbed
input_data = (
'{Pair 0 3 6 9; Pair 1 (Pair 4 (Pair 7 10)); {2; 5; 8; 11}}'
)
input_type = 'list (pair nat nat nat nat)'
client.normalize(input_data, input_type, mode=mode)
def test_normalize_legacy_flag(self, client_regtest_scrubbed):
client = client_regtest_scrubbed
input_data = '{Elt %a 0 1}'
input_type = 'map nat nat'
client.normalize(input_data, input_type, legacy=True)
error_pattern = 'unexpected annotation.'
with utils.assert_run_failure(error_pattern):
client.normalize(input_data, input_type, legacy=False)
@pytest.mark.parametrize('mode', modes)
def test_normalize_script(self, client_regtest_scrubbed, mode):
client = client_regtest_scrubbed
path = os.path.join(CONTRACT_PATH, 'opcodes', 'comb-literals.tz')
client.normalize_script(path, mode=mode)
types = [
'nat',
'list nat',
'pair nat int',
'list (pair nat int)',
'pair nat int bool',
'list (pair nat int bool)',
'pair nat int bool bytes',
'list (pair nat int bool bytes)',
]
@pytest.mark.parametrize('typ', types)
def test_normalize_type(self, client_regtest_scrubbed, typ):
client = client_regtest_scrubbed
client.normalize_type(typ)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, List, Optional, Tuple, TypeVar
import numpy as np
from numpy import ndarray as array
from paddleaudio.backends import depth_convert
from paddleaudio.utils import ParameterError
__all__ = [
'depth_augment',
'spect_augment',
'random_crop1d',
'random_crop2d',
'adaptive_spect_augment',
]
def randint(high: int) -> int:
"""Generate one random integer in range [0 high)
This is a helper function for random data augmentaiton
"""
return int(np.random.randint(0, high=high))
def rand() -> float:
"""Generate one floating-point number in range [0 1)
This is a helper function for random data augmentaiton
"""
return float(np.random.rand(1))
def depth_augment(y: array,
choices: List = ['int8', 'int16'],
probs: List[float] = [0.5, 0.5]) -> array:
""" Audio depth augmentation
Do audio depth augmentation to simulate the distortion brought by quantization.
"""
assert len(probs) == len(
choices
), 'number of choices {} must be equal to size of probs {}'.format(
len(choices), len(probs))
depth = np.random.choice(choices, p=probs)
src_depth = y.dtype
y1 = depth_convert(y, depth)
y2 = depth_convert(y1, src_depth)
return y2
def adaptive_spect_augment(spect: array,
tempo_axis: int = 0,
level: float = 0.1) -> array:
"""Do adpative spectrogram augmentation
The level of the augmentation is gowern by the paramter level,
ranging from 0 to 1, with 0 represents no augmentation。
"""
assert spect.ndim == 2., 'only supports 2d tensor or numpy array'
if tempo_axis == 0:
nt, nf = spect.shape
else:
nf, nt = spect.shape
time_mask_width = int(nt * level * 0.5)
freq_mask_width = int(nf * level * 0.5)
num_time_mask = int(10 * level)
num_freq_mask = int(10 * level)
if tempo_axis == 0:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[start:start + time_mask_width, :] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[:, start:start + freq_mask_width] = 0
else:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[:, start:start + time_mask_width] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[start:start + freq_mask_width, :] = 0
return spect
def spect_augment(spect: array,
tempo_axis: int = 0,
max_time_mask: int = 3,
max_freq_mask: int = 3,
max_time_mask_width: int = 30,
max_freq_mask_width: int = 20) -> array:
"""Do spectrogram augmentation in both time and freq axis
Reference:
"""
assert spect.ndim == 2., 'only supports 2d tensor or numpy array'
if tempo_axis == 0:
nt, nf = spect.shape
else:
nf, nt = spect.shape
num_time_mask = randint(max_time_mask)
num_freq_mask = randint(max_freq_mask)
time_mask_width = randint(max_time_mask_width)
freq_mask_width = randint(max_freq_mask_width)
if tempo_axis == 0:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[start:start + time_mask_width, :] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[:, start:start + freq_mask_width] = 0
else:
for _ in range(num_time_mask):
start = randint(nt - time_mask_width)
spect[:, start:start + time_mask_width] = 0
for _ in range(num_freq_mask):
start = randint(nf - freq_mask_width)
spect[start:start + freq_mask_width, :] = 0
return spect
def random_crop1d(y: array, crop_len: int) -> array:
""" Do random cropping on 1d input signal
The input is a 1d signal, typically a sound waveform
"""
if y.ndim != 1:
'only accept 1d tensor or numpy array'
n = len(y)
idx = randint(n - crop_len)
return y[idx:idx + crop_len]
def random_crop2d(s: array, crop_len: int, tempo_axis: int = 0) -> array:
""" Do random cropping for 2D array, typically a spectrogram.
The cropping is done in temporal direction on the time-freq input signal.
"""
if tempo_axis >= s.ndim:
raise ParameterError('axis out of range')
n = s.shape[tempo_axis]
idx = randint(high=n - crop_len)
sli = [slice(None) for i in range(s.ndim)]
sli[tempo_axis] = slice(idx, idx + crop_len)
out = s[tuple(sli)]
return out
|
import ast
import importlib
import os
import logging
import logging.config
import sys
from flask import Flask, Blueprint
from flask_restful import Api
from flask_cors import CORS
from typing import Dict, Any # noqa: F401
from flasgger import Swagger
from search_service.api.dashboard import SearchDashboardAPI
from search_service.api.table import SearchTableAPI, SearchTableFilterAPI
from search_service.api.user import SearchUserAPI
from search_service.api.document import DocumentUserAPI, DocumentTableAPI, DocumentTablesAPI, DocumentUsersAPI
from search_service.api.healthcheck import healthcheck
# For customized flask use below arguments to override.
FLASK_APP_MODULE_NAME = os.getenv('FLASK_APP_MODULE_NAME')
FLASK_APP_CLASS_NAME = os.getenv('FLASK_APP_CLASS_NAME')
FLASK_APP_KWARGS_DICT_STR = os.getenv('FLASK_APP_KWARGS_DICT')
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Environment Variable to enable cors
CORS_ENABLED = os.environ.get('CORS_ENABLED', False)
def create_app(*, config_module_class: str) -> Flask:
"""
Creates app in function so that flask with flask extensions can be
initialized with specific config. Here it defines the route of APIs
so that it can be seen in one place where implementation is separated.
Config is being fetched via module.class name where module.class name
can be passed through environment variable.
This is to make config fetched through runtime PYTHON_PATH so that
Config class can be easily injected.
More on: http://flask.pocoo.org/docs/1.0/config/
:param config_module_class: name of the config
:return: Flask
"""
if FLASK_APP_MODULE_NAME and FLASK_APP_CLASS_NAME:
print(f'Using requested Flask module {FLASK_APP_MODULE_NAME} '
f'and class {FLASK_APP_CLASS_NAME}', file=sys.stderr)
class_obj = getattr(
importlib.import_module(FLASK_APP_MODULE_NAME),
FLASK_APP_CLASS_NAME
)
flask_kwargs_dict = {} # type: Dict[str, Any]
if FLASK_APP_KWARGS_DICT_STR:
print(f'Using kwargs {FLASK_APP_KWARGS_DICT_STR} to instantiate Flask',
file=sys.stderr)
flask_kwargs_dict = ast.literal_eval(FLASK_APP_KWARGS_DICT_STR)
app = class_obj(__name__, **flask_kwargs_dict)
else:
app = Flask(__name__)
if CORS_ENABLED:
CORS(app)
config_module_class = \
os.getenv('SEARCH_SVC_CONFIG_MODULE_CLASS') or config_module_class
app.config.from_object(config_module_class)
if app.config.get('LOG_CONFIG_FILE'):
logging.config.fileConfig(app.config.get('LOG_CONFIG_FILE'), disable_existing_loggers=False)
else:
logging.basicConfig(format=app.config.get('LOG_FORMAT'), datefmt=app.config.get('LOG_DATE_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
logging.info('Creating app with config name {}'
.format(config_module_class))
logging.info('Created app with config name {}'.format(config_module_class))
api_bp = Blueprint('api', __name__)
api_bp.add_url_rule('/healthcheck', 'healthcheck', healthcheck)
api = Api(api_bp)
# Table Search API
# TODO: Rename endpoint to be more generic and accept a resource type so that logic can be re-used
api.add_resource(SearchTableFilterAPI, '/search_table')
api.add_resource(SearchTableAPI, '/search')
# User Search API
api.add_resource(SearchUserAPI, '/search_user')
# Dashboard Search API
api.add_resource(SearchDashboardAPI, '/search_dashboard')
# DocumentAPI
api.add_resource(DocumentTablesAPI, '/document_table')
api.add_resource(DocumentTableAPI, '/document_table/<document_id>')
api.add_resource(DocumentUsersAPI, '/document_user')
api.add_resource(DocumentUserAPI, '/document_user/<document_id>')
app.register_blueprint(api_bp)
if app.config.get('SWAGGER_ENABLED'):
Swagger(app, template_file=os.path.join(ROOT_DIR, app.config.get('SWAGGER_TEMPLATE_PATH')), parse=True)
return app
|
"""SampleProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from MyApp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^idealweight/',views.IdealWeight)
]
|
from django.contrib import admin
from .. import models
@admin.register(models.Problem)
class ProblemeAdmin(admin.ModelAdmin):
"""
문제관리
"""
list_display = ['problem_name', 'limit_time', 'limit_memory', 'scoring_type', 'level', 'info', 'is_open', 'checker_code']
class Meta:
model = models.Problem
@admin.register(models.ProblemSet)
class ProblemSetAdmin(admin.ModelAdmin):
"""
문제집관리
"""
list_display = ['set_name', 'editor', 'message']
class Meta:
model = models.ProblemSet
@admin.register(models.ProblemList)
class ProblemListAdmin(admin.ModelAdmin):
"""
문제집 문제관리
"""
list_display = ['problem_set', 'problem']
class Meta:
model = models.ProblemList
@admin.register(models.TestCase)
class TestCaseAdmin(admin.ModelAdmin):
"""
테스트케이스관리
"""
list_display = ['id', 'problem']
class Meta:
model = models.TestCase
|
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
import unittest
import sys
sys.path.append('../../hsds/util')
sys.path.append('../../hsds')
from dsetUtil import getHyperslabSelection, getSelectionShape
from dsetUtil import ItemIterator, getEvalStr
class DsetUtilTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(DsetUtilTest, self).__init__(*args, **kwargs)
# main
def testGetHyperslabSelection(self):
# getHyperslabSelection(dsetshape, start, stop, step)
# 1-D case
datashape = [100,]
slices = getHyperslabSelection(datashape)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0], slice(0, 100, 1))
slices = getHyperslabSelection(datashape, 20)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0], slice(20, 100, 1))
slices = getHyperslabSelection(datashape, 20, 80)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0], slice(20, 80, 1))
slices = getHyperslabSelection(datashape, 20, 80, 2)
self.assertEqual(len(slices), 1)
self.assertEqual(slices[0], slice(20, 80, 2))
datashape = [100, 50]
slices = getHyperslabSelection(datashape)
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0], slice(0, 100, 1))
self.assertEqual(slices[1], slice(0, 50, 1))
slices = getHyperslabSelection(datashape, (10, 20))
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0], slice(10, 100, 1))
self.assertEqual(slices[1], slice(20, 50, 1))
slices = getHyperslabSelection(datashape, (10, 20), (90, 30))
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0], slice(10, 90, 1))
self.assertEqual(slices[1], slice(20, 30, 1))
slices = getHyperslabSelection(datashape, (10, 20), (90, 30), (1,2))
self.assertEqual(len(slices), 2)
self.assertEqual(slices[0], slice(10, 90, 1))
self.assertEqual(slices[1], slice(20, 30, 2))
def testGetSelectionShape(self):
sel = [ slice(3,7,1), ]
shape = getSelectionShape(sel)
self.assertEqual(shape, [4,])
sel = [ slice(3,7,3), ] # select points 3, 6
shape = getSelectionShape(sel)
self.assertEqual(shape, [2,])
sel = [ slice(44,52,1), slice(48,52,1) ]
shape = getSelectionShape(sel)
self.assertEqual(shape, [8,4])
sel = [ slice(0, 4, 2), ] # select points 0, 2
shape = getSelectionShape(sel)
self.assertEqual(shape, [2,])
sel = [ slice(0, 5, 2), ] # select points 0, 2, 4
shape = getSelectionShape(sel)
self.assertEqual(shape, [3,])
def testGetEvalStr(self):
queries = { "date == 23": "rows['date'] == 23",
"wind == b'W 5'": "rows['wind'] == b'W 5'",
"temp > 61": "rows['temp'] > 61",
"(date >=22) & (date <= 24)": "(rows['date'] >=22) & (rows['date'] <= 24)",
"(date == 21) & (temp > 70)": "(rows['date'] == 21) & (rows['temp'] > 70)",
"(wind == b'E 7') | (wind == b'S 7')": "(rows['wind'] == b'E 7') | (rows['wind'] == b'S 7')" }
fields = ["date", "wind", "temp"]
for query in queries.keys():
eval_str = getEvalStr(query, "rows", fields)
self.assertEqual(eval_str, queries[query])
#print(query, "->", eval_str)
def testBadQuery(self):
queries = ( "foobar", # no variable used
"wind = b'abc", # non-closed literal
"(wind = b'N') & (temp = 32", # missing paren
"foobar > 42", # invalid field name
"import subprocess; subprocess.call(['ls', '/'])") # injection attack
fields = ("date", "wind", "temp" )
for query in queries:
try:
eval_str = getEvalStr(query, "x", fields)
self.assertTrue(False) # shouldn't get here
except Exception:
pass # ok
def testItemIterator(self):
# 1-D case
datashape = [10,]
slices = getHyperslabSelection(datashape)
it = ItemIterator(slices)
indices = []
count = 0
while True:
try:
index = it.next()
count += 1
indices.append(index)
except StopIteration:
break
self.assertEqual(count, 10)
self.assertEqual(indices, list(range(10)))
# 2-D case
datashape = [4, 5]
slices = getHyperslabSelection(datashape)
it = ItemIterator(slices)
indices = []
count = 0
while True:
try:
index = it.next()
self.assertTrue(len(index), 2)
self.assertTrue(index[0] >= 0)
self.assertTrue(index[0] < 4)
self.assertTrue(index[1] >= 0)
self.assertTrue(index[1] < 5)
count += 1
indices.append(index)
except StopIteration:
break
self.assertEqual(count, 20)
if __name__ == '__main__':
#setup test files
unittest.main()
|
from datetime import timedelta
from functools import partial
import numpy as np
from hyperopt import fmin, space_eval, tpe
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.log import Log
from fedot.core.pipelines.tuning.hyperparams import convert_params, get_node_params
from fedot.core.pipelines.tuning.tuner_interface import HyperoptTuner, _greater_is_better
MAX_METRIC_VALUE = 10e6
class PipelineTuner(HyperoptTuner):
"""
Class for hyperparameters optimization for all nodes simultaneously
"""
def __init__(self, pipeline, task, iterations=100,
timeout: timedelta = timedelta(minutes=5),
log: Log = None):
super().__init__(pipeline, task, iterations, timeout, log)
def tune_pipeline(self, input_data, loss_function, loss_params=None):
""" Function for hyperparameters tuning on the entire pipeline """
parameters_dict = self._get_parameters_for_tune(self.pipeline)
# Train test split
train_input, predict_input = train_test_data_setup(input_data)
test_target = np.array(predict_input.target)
is_need_to_maximize = _greater_is_better(target=test_target,
loss_function=loss_function,
loss_params=loss_params)
self.is_need_to_maximize = is_need_to_maximize
# Check source metrics for data
self.init_check(train_input, predict_input, test_target,
loss_function, loss_params)
best = fmin(partial(self._objective,
pipeline=self.pipeline,
train_input=train_input,
predict_input=predict_input,
test_target=test_target,
loss_function=loss_function,
loss_params=loss_params),
parameters_dict,
algo=tpe.suggest,
max_evals=self.iterations,
timeout=self.max_seconds)
best = space_eval(space=parameters_dict, hp_assignment=best)
tuned_pipeline = self.set_arg_pipeline(pipeline=self.pipeline,
parameters=best)
# Validation is the optimization do well
final_pipeline = self.final_check(train_input=train_input,
predict_input=predict_input,
test_target=test_target,
tuned_pipeline=tuned_pipeline,
loss_function=loss_function,
loss_params=loss_params)
return final_pipeline
@staticmethod
def set_arg_pipeline(pipeline, parameters):
""" Method for parameters setting to a pipeline
:param pipeline: pipeline to which parameters should ba assigned
:param parameters: dictionary with parameters to set
:return pipeline: pipeline with new hyperparameters in each node
"""
# Set hyperparameters for every node
for node_id, _ in enumerate(pipeline.nodes):
node_params = parameters.get(node_id)
if node_params is not None:
# Delete all prefix strings to get appropriate parameters names
new_params = convert_params(node_params)
# Update parameters in nodes
pipeline.nodes[node_id].custom_params = new_params
return pipeline
@staticmethod
def _get_parameters_for_tune(pipeline):
"""
Function for defining the search space
:param pipeline: pipeline to optimize
:return parameters_dict: dictionary with operation names and parameters
"""
parameters_dict = {}
for node_id, node in enumerate(pipeline.nodes):
operation_name = str(node.operation)
# Assign unique prefix for each model hyperparameter
# label - number of node in the pipeline
node_params = get_node_params(node_id=node_id,
operation_name=operation_name)
parameters_dict.update({node_id: node_params})
return parameters_dict
def _objective(self, parameters_dict, pipeline, train_input, predict_input,
test_target, loss_function, loss_params: dict):
"""
Objective function for minimization / maximization problem
:param parameters_dict: dictionary with operation names and parameters
:param pipeline: pipeline to optimize
:param train_input: input for train pipeline model
:param predict_input: input for test pipeline model
:param test_target: target for validation
:param loss_function: loss function to optimize
:param loss_params: parameters for loss function
:return metric_value: value of objective function
"""
# Set hyperparameters for every node
pipeline = PipelineTuner.set_arg_pipeline(pipeline=pipeline, parameters=parameters_dict)
try:
metric_value = PipelineTuner.get_metric_value(train_input=train_input,
predict_input=predict_input,
test_target=test_target,
pipeline=pipeline,
loss_function=loss_function,
loss_params=loss_params)
except Exception:
if self.is_need_to_maximize is True:
metric_value = -MAX_METRIC_VALUE
else:
metric_value = MAX_METRIC_VALUE
if self.is_need_to_maximize is True:
return -metric_value
else:
return metric_value
|
"""
Ops for downsampling images.
Planned:
Pool, DownsampleAvg, DownsampleSoftmax.
"""
from __future__ import absolute_import, print_function, division
# This file should move along with conv.py
import warnings
import itertools
import numpy as np
from six.moves import xrange
import six.moves.builtins as builtins
import theano
from theano import gof, OpenMPOp, tensor, Variable, Apply
from theano.gof import ParamsType, EnumList
from theano.gradient import DisconnectedType
from theano.scalar import bool as bool_t
def max_pool_2d_same_size(input, patch_size):
"""
Takes as input a 4-D tensor. It sets all non maximum values
of non-overlapping patches of size (patch_size[0],patch_size[1]) to zero,
keeping only the maximum values. The output has the same dimensions as
the input.
Parameters
----------
input : 4-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions.
patch_size : tuple of length 2 or theano vector of ints of size 2.
Size of the patch (patch height, patch width).
(2,2) will retain only one non-zero value per patch of 4 values.
"""
output = Pool(True)(input, patch_size)
outs = MaxPoolGrad(True)(input, output, output, patch_size)
return outs
def pool_2d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0),
mode='max', ds=None, st=None, padding=None):
"""Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 2. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ws[0],ws[1])
Parameters
----------
input : N-D theano tensor of input images
Input images. Max pooling will be done over the 2 last dimensions.
ws : tuple of length 2 or theano vector of ints of size 2.
Factor by which to downscale (vertical ws, horizontal ws).
(2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5) input with ws=(2,2) will generate a (2,2) output.
(3,3) otherwise.
stride : tuple of two ints or theano vector of ints of size 2.
Stride size, which is the number of shifts over rows/cols to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions), eg: stride=(1,1) will shifts over
one row and one col for every iteration.
pad : tuple of two ints or theano vector of ints of size 2.
(pad_h, pad_w), pad zeros to extend beyond four borders of the
images, pad_h is the size of the top and bottom margins, and
pad_w is the size of the left and right margins.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter stride instead.
padding
*deprecated*, use parameter pad instead.
"""
# check for deprecated parameter names
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad not in {None, (0, 0)}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'pad'.",
stacklevel=2
)
pad = padding
if input.ndim < 2:
raise NotImplementedError('pool_2d requires a dimension >= 2')
if ignore_border is None:
warnings.warn(
"pool_2d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter ignore_border=True."
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ws == stride and pad == (0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
op = Pool(ignore_border, ndim=2, mode=mode)
output = op(input, ws, stride, pad)
return output
def pool_3d(input, ws=None, ignore_border=None, stride=None, pad=(0, 0, 0),
mode='max', ds=None, st=None, padding=None):
"""Downscale the input by a specified factor
Takes as input a N-D tensor, where N >= 3. It downscales the input image by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ws[0],ws[1],ws[2])
Parameters
----------
input : N-D theano tensor of input images
Input images. Max pooling will be done over the 3 last dimensions.
ws : tuple of length 3 or theano vector of ints of size 3
Factor by which to downscale (vertical ws, horizontal ws, depth ws).
(2,2,2) will halve the image in each dimension.
ignore_border : bool (default None, will print a warning and set to False)
When True, (5,5,5) input with ws=(2,2,2) will generate a (2,2,2) output.
(3,3,3) otherwise.
st : tuple of three ints or theano vector of ints of size 3
Stride size, which is the number of shifts over rows/cols/slices to get
the next pool region. If st is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of two ints or theano vector of ints of size 3
(pad_h, pad_w, pad_d), pad zeros to extend beyond six borders of the
images, pad_h is the size of the top and bottom margins,
pad_w is the size of the left and right margins, and pad_d is the size
of the front and back margins
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
Operation executed on each window. `max` and `sum` always exclude
the padding in the computation. `average` gives you the choice to
include or exclude it.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
"""
# check for deprecated parameter names
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad not in {None, (0, 0, 0)}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'pad'.",
stacklevel=2
)
pad = padding
if input.ndim < 3:
raise NotImplementedError('pool_3d requires a dimension >= 3')
if ignore_border is None:
warnings.warn(
"pool_3d() will have the parameter ignore_border"
" default value changed to True (currently"
" False). To have consistent behavior with all Theano"
" version, explicitly add the parameter ignore_border=True."
" On the GPU, using ignore_border=True is needed to use cuDNN."
" When using ignore_border=False and not using cuDNN, the only"
" GPU combination supported is when"
" `ws == stride and pad == (0, 0, 0) and mode == 'max'`."
" Otherwise, the convolution will be executed on CPU.",
stacklevel=2)
ignore_border = False
op = Pool(ignore_border, ndim=3, mode=mode)
output = op(input, ws, stride, pad)
return output
# NB: This enum type is currently used in gpuarray/pool.py.
# It may be used later as op param in this current file.
# Enum name and constants names are inspired from cuDNN type `cudnnPoolingMode_t`
# (cf. `theano/gpuarray/cudnn_defs.py`).
PoolingMode_t = EnumList(('POOLING_MAX', 'max'),
('POOLING_SUM', 'sum'),
('POOLING_AVERAGE_COUNT_INCLUDE_PADDING', 'average_inc_pad'),
('POOLING_AVERAGE_COUNT_EXCLUDE_PADDING', 'average_exc_pad'))
class Pool(OpenMPOp):
"""
sum or average over different patches.
Parameters
----------
ws : list or tuple of N ints
Downsample factor over rows, columns etc.
ws indicates the size of the pooling region.
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
('average_inc_pad' excludes the padding from the count,
'average_exc_pad' include it)
ndim : int
The number of pooling dimensions N.
The default is 2.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
"""
__props__ = ('ignore_border', 'mode', 'ndim')
params_type = ParamsType(ignore_border=bool_t,)
@staticmethod
def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None,
ndim=2, ds=None, st=None, padding=None):
"""
Return the shape of the output from this op, for input of given
shape and flags.
Parameters
----------
imgshape : tuple, list, or similar of integer or scalar Theano variable
The shape of a tensor of images. The last N elements are
interpreted as the number of rows, and the number of cols.
ws : list or tuple of N ints
Downsample factor over rows and column.
ws indicates the pool region size.
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
ndim : int
The number of pooling dimensions N.
The default is 2.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
Returns
-------
list
The shape of the output from this op, for input of given shape.
This will have the same length as imgshape, but with last N
elements reduced as per the downsampling & ignore_border flags.
"""
# check for deprecated parameter names
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter is not going to exist"
" anymore as it is going to be replaced by the parameter"
" 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
zero_pad = (0,) * ndim
if pad not in {None, zero_pad}:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter is not going to"
" exist anymore as it is going to be replaced by the"
" parameter 'pad'.",
stacklevel=2
)
pad = padding
if ndim is None:
ndim = 2
assert ndim > 0
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if stride is None:
stride = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim))
def compute_out(v, downsample, stride):
if ignore_border:
if downsample == stride:
return v // stride
else:
out = (v - downsample) // stride + 1
if isinstance(out, theano.Variable):
return tensor.maximum(out, 0)
else:
return np.maximum(out, 0)
else:
if isinstance(v, theano.Variable):
return tensor.switch(tensor.ge(stride, downsample),
(v - 1) // stride + 1,
tensor.maximum(0, (v - 1 - downsample) //
stride + 1) + 1)
elif stride >= downsample:
return (v - 1) // stride + 1
else:
return max(0, (v - 1 - downsample + stride) // stride) + 1
out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape
return rval
def __init__(self, ignore_border=False, mode='max', ndim=2, openmp=None):
super(Pool, self).__init__(openmp=openmp)
self.ndim = ndim
self.ignore_border = ignore_border
if mode == 'max_deterministic':
# It seems max pool algo is already deterministic in CPU.
mode = 'max'
if mode not in ['max', 'average_inc_pad', 'average_exc_pad', 'sum']:
raise ValueError(
"Pool mode parameter only support 'max', 'sum',"
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode)
self.mode = mode
def prepare_node(self, node, storage_map, compute_map, impl):
if len(node.inputs) == 1:
# Old interface
self.ndim = len(node.op.ds)
self.mode = node.op.mode
ws = theano.tensor.constant(node.op.ds)
st = theano.tensor.constant(node.op.st)
pad = theano.tensor.constant(node.op.padding)
node.inputs.append(ws)
node.inputs.append(st)
node.inputs.append(pad)
if isinstance(ws, theano.Constant):
storage_map[ws] = [ws.data]
compute_map[ws] = [True]
else:
storage_map[ws] = [None]
compute_map[ws] = [False]
if isinstance(st, theano.Constant):
storage_map[st] = [st.data]
compute_map[st] = [True]
else:
storage_map[st] = [None]
compute_map[st] = [False]
if isinstance(pad, theano.Constant):
storage_map[pad] = [pad.data]
compute_map[pad] = [True]
else:
storage_map[pad] = [None]
compute_map[pad] = [False]
def make_node(self, x, ws, stride=None, pad=None):
# TODO: consider restricting the dtype?
x = tensor.as_tensor_variable(x)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
if x.type.ndim < nd:
raise TypeError()
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
# If the input shape are broadcastable we can have 0 in the output shape
broad = x.broadcastable[:-nd] + (False,) * nd
out = tensor.TensorType(x.dtype, broad)
return gof.Apply(self, [x, ws, stride, pad], [out()])
def perform(self, node, inp, out, params):
x, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'Pool requires input with {} or more dimensions'.format(nd))
z_shape = self.out_shape(x.shape, ws, params.ignore_border, stride, pad, nd)
if not params.ignore_border:
assert all(z > 0 for z in z_shape[-nd:])
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = np.empty(z_shape, dtype=x.dtype)
zz = z[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
# pad the image
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
else:
y = x
func = np.max
if self.mode == 'sum':
func = np.sum
elif self.mode != 'max':
func = np.average
# precompute the region boundaries for each dimension
region_slices = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
if not inc_pad:
start = builtins.max(start, pad[i])
end = builtins.min(end, img_shp[i] - pad[i])
region_slices[i].append(slice(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
zzk = zz[k]
yk = y[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
zzk[r] = func(
yk[[region_slices[i][r[i]] for i in xrange(nd)]])
def infer_shape(self, node, in_shapes):
ws, stride, pad = [node.inputs[1], node.inputs[2], node.inputs[3]]
shp = self.out_shape(in_shapes[0], ws, self.ignore_border, stride,
pad, self.ndim)
return [shp]
def L_op(self, inputs, outputs, grads):
x, ws, stride, pad = inputs
gz, = grads
disc = [DisconnectedType()() for i in inputs[1:]]
if self.mode == 'max':
return [MaxPoolGrad(ndim=self.ndim,
ignore_border=self.ignore_border)(
x, outputs[0], gz, ws=ws, stride=stride, pad=pad)] + disc
else:
return [AveragePoolGrad(ndim=self.ndim,
ignore_border=self.ignore_border,
mode=self.mode)(
x, gz, ws=ws, stride=stride, pad=pad)] + disc
def connection_pattern(self, node):
return [[1], [0], [0], [0]]
def R_op(self, inputs, eval_points):
if self.mode != 'max':
# Rop for average or sum is simply pooling evaluated at eval point
eval_inputs = [eval_points[0]] + inputs[1:]
return [self(*eval_inputs)]
# R_op can receive None as eval_points.
# That mean there is no diferientiable path through that input
# If this imply that you cannot compute some outputs,
# return None for those.
if eval_points[0] is None:
return [None]
z = self(*inputs)
x, ws, stride, pad = inputs
return [
DownsampleFactorMaxGradGrad(self.ignore_border, self.mode,
self.ndim)(x, z, eval_points[0], ws,
stride, pad)
]
def c_headers(self):
headers = ['<algorithm>']
headers += super(Pool, self).c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
if self.mode not in ('max', 'sum', 'average_exc_pad', 'average_inc_pad'):
raise theano.gof.utils.MethodNotDefined()
x, ws, stride, pad = inp
z, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
params = sub['params']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, collector) schedule(static)'
else:
omp_parallel = ''
ccode = """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(params)s->ignore_border && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero when ignore border is False");
%(fail)s;
}
if (%(params)s->ignore_border)
{
for (int i=0; i<%(nd)s; i++)
{
// '/' in C is different from '/' in python
if (r[i] - ws[i] < 0)
{
z[i] = 0;
}
else
{
z[i] = (r[i] - ws[i]) / st[i] + 1;
}
}
}
else
{
for (int i=0; i<%(nd)s; i++)
{
// decide how many rows/cols the output has
if (st[i] >= ws[i])
{
z[i] = (r[i] - 1) / st[i] + 1;
}
else
{
z[i] = std::max(0, (r[i] - 1 - ws[i] + st[i]) / st[i]) + 1;
}
assert(z[i] > 0);
}
}
// memory allocation of z if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(non_pool_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (!mem_nec)
{
for (int i=0; i<%(nd)s; i++)
{
if (PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i] != z[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[%(total_ndim)s];
for (int i=0; i<%(non_pool_ndim)s; i++)
{
dims[i] = PyArray_DIMS(%(x)s)[i];
}
for (int i=0; i<%(nd)s; i++)
{
dims[%(non_pool_ndim)s + i] = z[i];
}
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, dims, typenum,0);
}
// initialize temp var for the value in a region
dtype_%(x)s collector;
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// handle the case where no padding, ignore border is True
if (%(params)s->ignore_border)
{
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
}
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim, params=sub['params'])
ccode += """
// get a pointer to the correct position in the output
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, o_idx[0], o_idx[1], o_idx[2], o_idx[3])));
else
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s, o_idx)));
"""
if self.mode == 'max':
for i in xrange(nd):
ccode += """
// set the first index of dimension %(i)s
i_idx[%(non_pool_ndim)s + %(i)s] = r_st[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// use the first element as the initial value of collector
if (%(total_ndim)s == 4)
collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
collector = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update maximum
dtype_%(x)s a;
if (%(total_ndim)s == 4)
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
collector = (a > collector) ? a : collector;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
ccode += """
z[0] = collector;
"""
elif self.mode in ('sum', 'average_exc_pad', 'average_inc_pad'):
ccode += """
// initialize the sum at zero
collector = ((dtype_%(x)s)(0));
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update sum
dtype_%(x)s a;
if (%(total_ndim)s == 4)
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
else
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
collector += a;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
if self.mode == "sum":
ccode += """
z[0] = collector;
"""
elif self.mode == 'average_inc_pad' and self.ignore_border:
# region size = product over all pooling dimensions
region_size = ' * '.join('ws[%d]' % i for i in xrange(nd))
ccode += """
z[0] = collector / (%(region_size)s);
""" % dict(region_size=region_size)
else:
# region size = number elements of in this region
region_size = ' * '.join('(r_end[%d]-r_st[%d])' % (i, i) for i in xrange(nd))
ccode += """
z[0] = collector / (%(region_size)s);
""" % dict(region_size=region_size)
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (9, self.openmp)
class PoolGrad(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
@staticmethod
def out_shape(imgshape, ws=None, ignore_border=False, stride=None, pad=None, ndim=2,
ds=None, st=None, padding=None):
"""Return the shape of the output from this op, for input of given
shape and flags.
Parameters
----------
imgshape : tuple of integers or scalar Theano variables
the shape of a tensor of images. The last N elements are
interpreted as the downsampling dimensions.
ws : tuple of N ints
downsample factor over rows and columns this parameter
indicates the size of the pooling region
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
ndim : int
The number of pooling dimensions N.
The default is 2.
ds
*deprecated*, use parameter ws instead.
st
*deprecated*, use parameter st instead.
padding
*deprecated*, use parameter pad instead.
Returns
-------
list :
the shape of the output from this op, for input of given
shape. This will have the same length as imgshape, but
with last N elements reduced as per the downsampling &
ignore_border flags.
"""
# check for deprecated parameter names
if ds is not None:
if ws is not None:
raise ValueError(
"You can't provide a tuple value to both 'ws' and 'ds'."
" Please provide a value only to 'ws'."
)
else:
warnings.warn(
"DEPRECATION: the 'ds' parameter in PoolGrad is not going"
" to exist anymore as it is going to be replaced by the"
" parameter 'ws'.",
stacklevel=2
)
ws = ds
elif ds is None and ws is None:
raise ValueError(
"You must provide a tuple value for the window size."
)
if st is not None:
if stride is not None:
raise ValueError(
"You can't provide a tuple value to both 'st and 'stride'."
" Please provide a value only to 'stride'."
)
else:
warnings.warn(
"DEPRECATION: the 'st' parameter in PoolGrad is not going"
" to exist anymore as it is going to be replaced by the"
" parameter 'stride'.",
stacklevel=2
)
stride = st
if padding is not None:
if pad is not None:
raise ValueError(
"You can't provide a tuple value to both 'padding' and pad."
" Please provide a value only to pad."
)
else:
warnings.warn(
"DEPRECATION: the 'padding' parameter in PoolGrad is not"
" going to exist anymore as it is going to be replaced"
" by the parameter 'pad'.",
stacklevel=2
)
pad = padding
if len(imgshape) < ndim:
raise TypeError('imgshape must have at least {} dimensions'.format(ndim))
if stride is None:
stride = ws
if pad is None:
pad = (0,) * ndim
patch_shape = tuple(tensor.extract_constant(imgshape[-ndim + i]) + pad[i] * 2
for i in xrange(ndim))
def compute_out(v, downsample, stride):
if ignore_border:
out = (v - downsample) // stride + 1
if isinstance(out, theano.Variable):
return tensor.maximum(out, 0)
else:
return np.maximum(out, 0)
else:
if isinstance(v, theano.Variable):
return tensor.switch(tensor.ge(stride, downsample),
(v - 1) // stride + 1,
tensor.maximum(0, (v - 1 - downsample) //
stride + 1) + 1)
elif stride >= downsample:
return (v - 1) // stride + 1
else:
return max(0, (v - 1 - downsample) // stride + 1) + 1
out_shape = [compute_out(patch_shape[i], ws[i], stride[i]) for i in xrange(ndim)]
rval = list(imgshape[:-ndim]) + out_shape
return rval
def __init__(self, ignore_border, mode='max', ndim=2, openmp=None):
self.ndim = ndim
self.ignore_border = ignore_border
if mode == 'max_deterministic':
# It seems max pool grad algo is already deterministic in CPU.
mode = 'max'
if mode not in ['max', 'sum', 'average_inc_pad', 'average_exc_pad']:
raise ValueError(
"Pool mode parameter only support 'max', 'sum',"
" 'average_inc_pad' and 'average_exc_pad'. Got %s" % mode)
self.mode = mode
super(PoolGrad, self).__init__(openmp=openmp)
def prepare_node(self, node, storage_map, compute_map, impl):
if len(node.inputs) < 5: # 5 for AveragePoolGrad, 6 for MaxPoolGrad
# Old interface
self.ndim = len(node.op.ds)
self.mode = node.op.mode
ws = theano.tensor.constant(node.op.ds)
st = theano.tensor.constant(node.op.st)
pad = theano.tensor.constant(node.op.padding)
node.inputs.append(ws)
node.inputs.append(st)
node.inputs.append(pad)
if isinstance(ws, theano.Constant):
storage_map[ws] = [ws.data]
compute_map[ws] = [True]
else:
storage_map[ws] = [None]
compute_map[ws] = [False]
if isinstance(st, theano.Constant):
storage_map[st] = [st.data]
compute_map[st] = [True]
else:
storage_map[st] = [None]
compute_map[st] = [False]
if isinstance(pad, theano.Constant):
storage_map[pad] = [pad.data]
compute_map[pad] = [True]
else:
storage_map[pad] = [None]
compute_map[pad] = [False]
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
class MaxPoolGrad(PoolGrad):
# params_type ignore_border don't change c code
def __init__(self, ignore_border, ndim=2, openmp=None):
PoolGrad.__init__(self, ignore_border, mode='max', ndim=ndim, openmp=openmp)
def make_node(self, x, maxout, gz, ws, stride=None, pad=None):
# make_node should only be called by the grad function of
# Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert isinstance(x, Variable) and x.ndim >= nd
assert isinstance(maxout, Variable) and maxout.ndim >= nd
assert isinstance(gz, Variable) and gz.ndim >= nd
assert isinstance(ws, Variable) and ws.ndim == 1
assert isinstance(stride, Variable) and stride.ndim == 1
assert isinstance(pad, Variable) and pad.ndim == 1
assert x.ndim == maxout.ndim == gz.ndim >= nd
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
assert self.mode == 'max'
x, maxout, gz, ws, stride, pad = inp
gx_stg, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'MaxPoolGrad requires input with {} or more dimensions'.format(nd))
pool_out_shp = maxout.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
# pad the image
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
else:
y = x
gx = np.zeros_like(y)
# precompute the region boundaries for each dimension
region_ranges = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = builtins.max(j * stride[i], pad[i])
end = builtins.min(start + ws[i], img_shp[i])
region_ranges[i].append(xrange(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
gxk = gx[k]
gzk = gz[k]
yk = y[k]
maxoutk = maxout[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
maxout_value = maxoutk[r]
# iterate inside region
for c in itertools.product(*[region_ranges[i][r[i]]
for i in xrange(nd)]):
if maxout_value == yk[c]:
gxk[c] += gzk[r]
# unpad the image
gx = gx[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))]
gx_stg[0] = gx
def grad(self, inp, grads):
x, maxout, gz, ws, stride, pad = inp
ggx, = grads
return ([theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
DownsampleFactorMaxGradGrad(ndim=self.ndim,
ignore_border=self.ignore_border)(
x, maxout, ggx, ws, stride, pad)] +
[DisconnectedType()() for i in inp[3:]])
def connection_pattern(self, node):
return [[1], [1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
assert self.mode == 'max'
x, z, gz, ws, stride, pad = inp
gx, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, maximum) schedule(static)'
else:
omp_parallel = ''
ccode = """
// sanity checks
int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int z_typenum = PyArray_ObjectType((PyObject*)%(z)s, 0);
int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);
if ((x_typenum != z_typenum) || (x_typenum != gz_typenum))
{
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(z)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "z must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(gz)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "gz must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(gx)s) || !PyArray_ISCONTIGUOUS(%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(gx)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(x)s), x_typenum,0);
}
else {
PyArray_FILLWBYTE(%(gx)s, 0);
}
dtype_%(z)s maximum; // temp var for maximum value in a region
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] =o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gz)s * gz;
if (%(total_ndim)s == 4)
{
// the maximum value
maximum = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])))[0];
// the gradient corresponding to this maximum value in z
gz = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the maximum value
maximum = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s,o_idx)))[0];
// the gradient corresponding to this maximum value in z
gz = ((dtype_%(gz)s*)(PyArray_GetPtr(%(gz)s, o_idx)));
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(x)s a;
dtype_%(gx)s * gx;
if (%(total_ndim)s == 4)
{
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
gx = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
gx = ((dtype_%(gx)s*)(PyArray_GetPtr(%(gx)s, i_idx)));
}
if (a == maximum){
gx[0] = gx[0] + gz[0];
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 10, self.openmp)
class AveragePoolGrad(PoolGrad):
# ignore_border is used for perform, but not c code. No need in params_type
def __init__(self, ignore_border, mode='average_inc_pad', ndim=2):
assert mode in ['sum', 'average_inc_pad', 'average_exc_pad']
PoolGrad.__init__(self, ignore_border, mode, ndim)
# There is an extra dummy parameter to match the parameter count
# of MaxPoolGrad. They have to keep the same interface because of
# the DownsampleFactorMaxGrad trick to keep old scripts working
# (see downsample.py for details on this).
def make_node(self, x, gz, ws, stride=None, pad=None, dummy=None):
# make_node should only be called by the grad function of
# Pool, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert isinstance(x, Variable) and x.ndim >= nd
assert isinstance(gz, Variable) and gz.ndim >= nd
assert isinstance(ws, Variable) and ws.ndim == 1
assert isinstance(stride, Variable) and stride.ndim == 1
assert x.ndim == gz.ndim >= nd
assert isinstance(pad, Variable) and pad.ndim == 1
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
x, gz, ws, stride, pad = inp
gx_stg, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'AveragePoolGrad requires input with {} or more dimensions'.format(nd))
if self.mode == 'average_exc_pad' and max(pad) != 0:
raise NotImplementedError()
z_shape = self.out_shape(x.shape, ws, self.ignore_border, stride, pad, nd)
if (gx_stg[0] is None) or (gx_stg[0].shape != z_shape):
gx_stg[0] = np.empty(z_shape, dtype=x.dtype)
zz = gx_stg[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
sum_mode = self.mode == 'sum'
# initialize the padded output
gx = np.zeros((x.shape[:-nd] + img_shp), dtype=x.dtype)
# precompute the region boundaries and sizes for each dimension
region_slices = [[] for i in xrange(nd)]
region_sizes = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
if sum_mode or inc_pad:
start = j * stride[i]
else:
start = builtins.max(j * stride[i], pad[i])
end = builtins.min(start + ws[i], img_shp[i])
region_slices[i].append(slice(start, end))
region_sizes[i].append(end - start)
# iterate over non-pooling dimensions
region_slice = [None] * nd
for k in np.ndindex(*x.shape[:-nd]):
gzk = gz[k]
gxk = gx[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
region_size = 1
for i in xrange(nd):
region_slice[i] = region_slices[i][r[i]]
region_size *= region_sizes[i][r[i]]
if sum_mode:
val = gzk[r]
else:
# divide by region size
val = gzk[r] / region_size
gxk[region_slice] += val
# unpad the image
gx = gx[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))]
gx_stg[0] = gx
def grad(self, inp, grads):
x, gz, ws, stride, pad = inp
ggx, = grads
return ([theano.tensor.zeros_like(x),
Pool(ignore_border=self.ignore_border,
ndim=self.ndim, mode=self.mode)(ggx,
ws, stride, pad)] + [DisconnectedType()() for i in inp[2:]])
def connection_pattern(self, node):
return [[1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
x, gz, ws, stride, pad = inp
gx, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
inc_pad = int(self.mode == 'average_inc_pad')
sum_mode = int(self.mode == 'sum')
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_pad_width, r_idx, i_idx, o_idx) schedule(static)'
else:
omp_parallel = ''
ccode = """
// sanity checks
int x_typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
int gz_typenum = PyArray_ObjectType((PyObject*)%(gz)s, 0);
if (x_typenum != gz_typenum)
{
PyErr_SetString(PyExc_ValueError, "input types must all match");
%(fail)s;
}
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(gz)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "gz must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(gz)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(inc_pad)s && !%(sum_mode)s && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero for average_exc_pad");
%(fail)s;
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(gx)s) || !PyArray_ISCONTIGUOUS(%(gx)s)
|| *PyArray_DIMS(%(gx)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(gx)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(gx)s);
%(gx)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(x)s), x_typenum,0);
}
else {
PyArray_FILLWBYTE(%(gx)s, 0);
}
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// padded region size
int r_pad_width[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] =o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
if (!%(sum_mode)s && !%(inc_pad)s && r_st[%(i)s] < pd[%(i)s])
{
r_st[%(i)s] = pd[%(i)s];
}
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
r_pad_width[%(i)s] = r_end[%(i)s] - r_st[%(i)s];
// from padded_img space to img space
r_st[%(i)s] = r_st[%(i)s] - pd[%(i)s] > 0 ? r_st[%(i)s] - pd[%(i)s] : 0;
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] - pd[%(i)s] ? r[%(i)s] - 2 * pd[%(i)s] : r_end[%(i)s] - pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, sum_mode=sum_mode, inc_pad=inc_pad, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gz)s * gz;
dtype_%(gz)s val;
if (%(total_ndim)s == 4)
{
// the gradient for this region
gz = ((dtype_%(gz)s*)(PyArray_GETPTR4(%(gz)s, o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the gradient for this region
gz = ((dtype_%(gz)s*)(PyArray_GetPtr(%(gz)s, o_idx)));
}
// compute the contribution
if (%(sum_mode)s)
{
val = gz[0];
}
else
{
val = gz[0] / (%(region_size)s);
}
"""
region_size = ' * '.join('r_pad_width[%d]' % i for i in xrange(nd))
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(gx)s * gx;
if (%(total_ndim)s == 4)
{
gx = ((dtype_%(gx)s*)(PyArray_GETPTR4(%(gx)s, i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
gx = ((dtype_%(gx)s*)(PyArray_GetPtr(%(gx)s, i_idx)));
}
gx[0] = gx[0] + val;
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 3, self.openmp)
class DownsampleFactorMaxGradGrad(OpenMPOp):
__props__ = ('ignore_border', 'mode', 'ndim')
def __init__(self, ignore_border, mode='max', ndim=2, openmp=None):
self.ndim = ndim
self.ignore_border = ignore_border
self.mode = mode
super(DownsampleFactorMaxGradGrad, self).__init__(openmp=openmp)
assert self.mode == 'max'
def make_node(self, x, maxout, gz, ws, stride=None, pad=None):
# make_node should only be called by the grad function of
# MaxPoolGrad, so these asserts should not fail.
x = tensor.as_tensor_variable(x)
maxout = tensor.as_tensor_variable(maxout)
gz = tensor.as_tensor_variable(gz)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
assert x.ndim == maxout.ndim == gz.ndim >= nd
if ws.dtype not in tensor.int_dtypes:
raise TypeError('Pool downsample parameters must be ints.')
if stride.dtype not in tensor.int_dtypes:
raise TypeError('Stride parameters must be ints.')
if pad.dtype not in tensor.int_dtypes:
raise TypeError('Padding parameters must be ints.')
return Apply(self, [x, maxout, gz, ws, stride, pad], [x.type()])
def perform(self, node, inp, out):
x, maxout, ggx, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'DownsampleFactorMaxGradGrad requires input '
'with {} or more dimensions'.format(nd))
if (z[0] is None) or (z[0].shape != maxout.shape):
z[0] = np.zeros(maxout.shape, dtype=x.dtype)
ggz = z[0] # grad wrt maxout_grad has the same shape as maxout
# size of pooling output
pool_out_shp = ggz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
# pad the image and its gradients
if max(pad) > 0:
y_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y_padded[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
ggx_padded = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
ggx_padded[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ggx
else:
y_padded = x
ggx_padded = ggx
# precompute the region boundaries for each dimension
region_ranges = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
region_ranges[i].append(xrange(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
ggxk = ggx_padded[k]
ggzk = ggz[k]
yk = y_padded[k]
maxoutk = maxout[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
# iterate inside region
maxout_value = maxoutk[r]
for c in itertools.product(*[region_ranges[i][r[i]]
for i in xrange(nd)]):
if maxout_value == yk[c]:
ggzk[r] += ggxk[c]
def infer_shape(self, node, in_shapes):
return [in_shapes[1]]
def grad(self, inp, grads):
x, maxout, ggx, ws, stride, pad = inp
gz, = grads
return [theano.tensor.zeros_like(x),
theano.tensor.zeros_like(maxout),
MaxPoolGrad(ignore_border=self.ignore_border,
ndim=self.ndim)(x, maxout, gz,
ws, stride, pad),
DisconnectedType()(),
DisconnectedType()(),
DisconnectedType()()]
def connection_pattern(self, node):
return [[1], [1], [1], [0], [0], [0]]
def c_code(self, node, name, inp, out, sub):
if self.mode != 'max':
raise theano.gof.utils.MethodNotDefined()
x, maxout, ggx, ws, stride, pad = inp
z, = out # the grad of grad
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, maximum) schedule(static)'
else:
omp_parallel = ''
ccode = """
int z_typenum = PyArray_ObjectType((PyObject*)%(maxout)s, 0);
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
z[i] = PyArray_DIMS(%(maxout)s)[%(non_pool_ndim)s + i];
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
}
// allocating memory for output, if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || !PyArray_ISCONTIGUOUS(%(z)s)
|| *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(total_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(maxout)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, PyArray_DIMS(%(maxout)s), z_typenum,0);
}
else {
PyArray_FILLWBYTE(%(z)s, 0);
}
dtype_%(maxout)s maximum; // temp var for maximum value in a region
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod;
non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
{
// the maximum value
maximum = ((dtype_%(maxout)s*)(PyArray_GETPTR4(%(maxout)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])))[0];
// z at this position
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s,o_idx[0],o_idx[1],o_idx[2],o_idx[3])));
}
else
{
// the maximum value
maximum = ((dtype_%(maxout)s*)(PyArray_GetPtr(%(maxout)s,o_idx)))[0];
// z at this position
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s,o_idx)));
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
dtype_%(x)s a;
dtype_%(ggx)s * ggx;
if (%(total_ndim)s == 4)
{
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
ggx = ((dtype_%(ggx)s*)(PyArray_GETPTR4(%(ggx)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])));
}
else
{
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
ggx = ((dtype_%(ggx)s*)(PyArray_GetPtr(%(ggx)s,i_idx)));
}
if (a == maximum){
z[0] += ggx[0];
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
"""
return ccode % locals()
def c_code_cache_version(self):
return (0, 4, self.openmp)
class MaxPoolRop(OpenMPOp):
"""
Implements the R-operator for the downsample operation.
Parameters
----------
ws : list or tuple of N ints
Downsample factor over rows, columns etc.
ws indicates the size of the pooling region.
ignore_border : bool
If ws doesn't divide imgshape, do we include an extra row/col/slice
of partial downsampling (False) or ignore it (True).
stride : list or tuple of N ints or None
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region. If stride is None, it is considered equal to ws
(no overlap on pooling regions).
pad : tuple of N ints or None
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins. No padding is added if pad is None.
mode : {'max', 'sum', 'average_inc_pad', 'average_exc_pad'}
('average_inc_pad' excludes the padding from the count,
'average_exc_pad' include it)
ndim : int
The number of pooling dimensions N.
The default is 2.
"""
__props__ = ('ignore_border', 'mode', 'ndim')
params_type = ParamsType(ignore_border=bool_t,)
def __init__(self, ignore_border=False, mode='max', ndim=2, openmp=None):
super(MaxPoolRop, self).__init__(openmp=openmp)
self.ndim = ndim
self.ignore_border = ignore_border
self.mode = mode
assert mode == 'max'
def make_node(self, x, eval_point, ws, stride=None, pad=None):
# TODO: consider restricting the dtype?
x = tensor.as_tensor_variable(x)
eval_point = tensor.as_tensor_variable(eval_point)
nd = self.ndim
if stride is None:
stride = ws
if pad is None:
pad = (0,) * nd
elif isinstance(pad, (tuple, list)):
if max(pad) != 0 and not self.ignore_border:
raise NotImplementedError(
'padding works only with ignore_border=True')
if isinstance(ws, (tuple, list)):
if any(pad[i] >= ws[i] for i in range(nd)):
raise NotImplementedError(
'padding must be smaller than strides')
ws = tensor.as_tensor_variable(ws)
stride = tensor.as_tensor_variable(stride)
pad = tensor.as_tensor_variable(pad)
assert ws.ndim == 1
assert stride.ndim == 1
assert pad.ndim == 1
if x.type.ndim < nd:
raise TypeError()
if not ws.dtype.startswith('int'):
raise TypeError('Pool downsample parameters must be ints.')
if not stride.dtype.startswith('int'):
raise TypeError('Stride parameters must be ints.')
if not pad.dtype.startswith('int'):
raise TypeError('Padding parameters must be ints.')
# If the input shape are broadcastable we can have 0 in the output shape
broad = x.broadcastable[:-nd] + (False,) * nd
out = tensor.TensorType(eval_point.dtype, broad)
return gof.Apply(self, [x, eval_point, ws, stride, pad], [out()])
def perform(self, node, inp, out, params):
x, ex, ws, stride, pad = inp
z, = out
nd = self.ndim
assert ws.shape == stride.shape == pad.shape == (nd,)
if len(x.shape) < nd:
raise NotImplementedError(
'Pool requires input with {} or more dimensions'.format(nd))
z_shape = Pool.out_shape(x.shape, ws, params.ignore_border, stride, pad, nd)
if not self.ignore_border:
assert all(z > 0 for z in z_shape[-nd:])
if (z[0] is None) or (z[0].shape != z_shape):
z[0] = np.empty(z_shape, dtype=x.dtype)
zz = z[0]
# size of pooling output
pool_out_shp = zz.shape[-nd:]
img_shp = tuple(x.shape[-nd + i] + 2 * pad[i] for i in xrange(nd))
inc_pad = self.mode == 'average_inc_pad'
# pad the image and the eval point
if max(pad) != 0:
y = np.zeros(x.shape[:-nd] + img_shp, dtype=x.dtype)
y[(slice(None),) * (len(x.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = x
ey = np.zeros(ex.shape[:-nd] + img_shp, dtype=ex.dtype)
ey[(slice(None),) * (len(ex.shape) - nd) +
tuple(slice(pad[i], img_shp[i] - pad[i]) for i in xrange(nd))] = ex
else:
y = x
ey = ex
# precompute the region boundaries for each dimension
region_slices = [[] for i in xrange(nd)]
for i in xrange(nd):
for j in xrange(pool_out_shp[i]):
start = j * stride[i]
end = builtins.min(start + ws[i], img_shp[i])
if not inc_pad:
start = builtins.max(start, pad[i])
end = builtins.min(end, img_shp[i] - pad[i])
region_slices[i].append(slice(start, end))
# iterate over non-pooling dimensions
for k in np.ndindex(*x.shape[:-nd]):
zzk = zz[k]
yk = y[k]
eyk = ey[k]
# iterate over pooling regions
for r in np.ndindex(*pool_out_shp):
# current slice in padded input
ykslice = yk[[region_slices[i][r[i]] for i in xrange(nd)]]
# current slice in eval points
eykslice = eyk[[region_slices[i][r[i]] for i in xrange(nd)]]
# indices of maximum
idx = np.unravel_index(np.argmax(ykslice), ykslice.shape)
zzk[r] = eykslice[idx]
def c_headers(self):
headers = ['<algorithm>']
headers += super(MaxPoolRop, self).c_headers()
return headers
def c_code(self, node, name, inp, out, sub):
if self.mode != 'max':
raise theano.gof.utils.MethodNotDefined()
x, ex, ws, stride, pad = inp
z, = out
nd = self.ndim
total_ndim = node.inputs[0].ndim
non_pool_ndim = total_ndim - nd
fail = sub['fail']
params = sub['params']
if self.openmp:
# run in parallel over each pooling block
omp_parallel = '#pragma omp parallel for private(r_st, r_end, r_idx, i_idx, o_idx, collector, eval_collector) schedule(static)'
else:
omp_parallel = ''
ccode = """
int typenum = PyArray_ObjectType((PyObject*)%(x)s, 0);
if(PyArray_NDIM(%(x)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "x must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_NDIM(%(ex)s)!=%(total_ndim)s)
{
PyErr_SetString(PyExc_ValueError, "eval_point must be a %(total_ndim)sD ndarray");
%(fail)s;
}
if(PyArray_DIM(%(ws)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "ws must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(stride)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "stride must be a vector of size %(nd)s");
%(fail)s;
}
if(PyArray_DIM(%(pad)s, 0)!=%(nd)s)
{
PyErr_SetString(PyExc_ValueError, "pad must be a vector of size %(nd)s");
%(fail)s;
}
int z[%(nd)s]; // shape of the output
int r[%(nd)s]; // shape of the padded_input
int ws[%(nd)s];
int st[%(nd)s];
int pd[%(nd)s];
int nonzero_padding;
nonzero_padding = 0;
for (int i=0; i<%(nd)s; i++)
{
ws[i] = *((npy_intp*)PyArray_GETPTR1(%(ws)s, i));
st[i] = *((npy_intp*)PyArray_GETPTR1(%(stride)s, i));
pd[i] = *((npy_intp*)PyArray_GETPTR1(%(pad)s, i));
r[i] = PyArray_DIMS(%(x)s)[%(non_pool_ndim)s + i] + 2 * pd[i];
if (pd[i]>0)
nonzero_padding = 1;
}
if (!%(params)s->ignore_border && nonzero_padding)
{
PyErr_SetString(PyExc_ValueError,
"padding must be zero when ignore border is False");
%(fail)s;
}
if (%(params)s->ignore_border)
{
for (int i=0; i<%(nd)s; i++)
{
// '/' in C is different from '/' in python
if (r[i] - ws[i] < 0)
{
z[i] = 0;
}
else
{
z[i] = (r[i] - ws[i]) / st[i] + 1;
}
}
}
else
{
for (int i=0; i<%(nd)s; i++)
{
// decide how many rows/cols the output has
if (st[i] >= ws[i])
{
z[i] = (r[i] - 1) / st[i] + 1;
}
else
{
z[i] = std::max(0, (r[i] - 1 - ws[i] + st[i]) / st[i]) + 1;
}
assert(z[i] > 0);
}
}
// memory allocation of z if necessary
int mem_nec;
mem_nec = 0;
if ((!%(z)s) || *PyArray_DIMS(%(z)s)!=%(total_ndim)s)
{
mem_nec = 1;
}
if (!mem_nec)
{
for (int i=0; i<%(non_pool_ndim)s; i++)
{
if (PyArray_DIMS(%(z)s)[i] != PyArray_DIMS(%(x)s)[i])
{
mem_nec = 1;
break;
}
}
}
if (!mem_nec)
{
for (int i=0; i<%(nd)s; i++)
{
if (PyArray_DIMS(%(z)s)[%(non_pool_ndim)s + i] != z[i])
{
mem_nec = 1;
break;
}
}
}
if (mem_nec)
{
if (%(z)s) Py_XDECREF(%(z)s);
npy_intp dims[%(total_ndim)s];
for (int i=0; i<%(non_pool_ndim)s; i++)
{
dims[i] = PyArray_DIMS(%(x)s)[i];
}
for (int i=0; i<%(nd)s; i++)
{
dims[%(non_pool_ndim)s + i] = z[i];
}
//TODO: zeros not necessary
%(z)s = (PyArrayObject*) PyArray_ZEROS(%(total_ndim)s, dims, typenum,0);
}
// initialize temp var for the value in a region
dtype_%(x)s collector;
dtype_%(ex)s eval_collector;
int z_prod;
// do not run if any z[i] is zero
z_prod = 1;
for (int i=0; i<%(nd)s; i++)
{
z_prod *= z[i];
}
if (z_prod)
{
// will be used to hold start and end index of a region
int r_st[%(nd)s];
int r_end[%(nd)s];
// index for iterating over the pooling regions
int r_idx[%(nd)s];
// placeholder for PyArray indexing (output)
npy_intp o_idx[%(total_ndim)s];
// placeholder for PyArray indexing (input)
npy_intp i_idx[%(total_ndim)s];
// loop over non-pooling dimensions
int non_pooling_prod = 1;
for (int i=0; i<%(non_pool_ndim)s; i++)
{
non_pooling_prod *= PyArray_DIMS(%(x)s)[i];
}
%(omp_parallel)s
// first loop over non-pooling dimensions
for (int t=0; t<non_pooling_prod; t++)
{
// compute the non-pooling index in each dimension
if (%(non_pool_ndim)s!=0)
{
o_idx[0] = t;
i_idx[0] = t;
for (int i=1; i<%(non_pool_ndim)s; i++)
{
o_idx[i] = o_idx[i - 1] / PyArray_DIMS(%(x)s)[i - 1];
o_idx[i - 1] = o_idx[i - 1] %% PyArray_DIMS(%(x)s)[i - 1];
i_idx[i] = o_idx[i];
i_idx[i - 1] = o_idx[i - 1];
}
}
// then loop over each region in each pooling dimension
"""
for i in xrange(nd):
ccode += """
for (r_idx[%(i)s]=0; r_idx[%(i)s] < z[%(i)s]; r_idx[%(i)s]++) {
r_st[%(i)s] = r_idx[%(i)s] * st[%(i)s];
r_end[%(i)s] = r_st[%(i)s] + ws[%(i)s];
// skip the padding
r_st[%(i)s] = r_st[%(i)s] < pd[%(i)s] ? pd[%(i)s] : r_st[%(i)s];
r_end[%(i)s] = r_end[%(i)s] > (r[%(i)s] - pd[%(i)s]) ? r[%(i)s] - pd[%(i)s] : r_end[%(i)s];
// from padded_img space to img space
r_st[%(i)s] -= pd[%(i)s];
r_end[%(i)s] -= pd[%(i)s];
// handle the case where no padding, ignore border is True
if (%(params)s->ignore_border)
{
r_end[%(i)s] = r_end[%(i)s] > r[%(i)s] ? r[%(i)s] : r_end[%(i)s];
}
// use the index to find the correct position in the output
o_idx[%(non_pool_ndim)s + %(i)s] = r_idx[%(i)s];
""" % dict(i=i, params=sub['params'], non_pool_ndim=non_pool_ndim)
ccode += """
// get a pointer to the correct position in the output
dtype_%(z)s * z;
if (%(total_ndim)s == 4)
z = ((dtype_%(z)s*)(PyArray_GETPTR4(%(z)s, o_idx[0], o_idx[1], o_idx[2], o_idx[3])));
else
z = ((dtype_%(z)s*)(PyArray_GetPtr(%(z)s, o_idx)));
"""
for i in xrange(nd):
ccode += """
// set the first index of dimension %(i)s
i_idx[%(non_pool_ndim)s + %(i)s] = r_st[%(i)s];
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// use the first element as the initial value of collector
if (%(total_ndim)s == 4) {
collector = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
eval_collector = ((dtype_%(ex)s*)(PyArray_GETPTR4(%(ex)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
} else {
collector = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
eval_collector = ((dtype_%(ex)s*)(PyArray_GetPtr(%(ex)s,i_idx)))[0];
}
"""
for i in xrange(nd):
ccode += """
// go through the pooled region in the unpadded input
for(int m%(i)s=r_st[%(i)s]; m%(i)s<r_end[%(i)s]; m%(i)s++)
{
i_idx[%(non_pool_ndim)s + %(i)s] = m%(i)s;
""" % dict(i=i, non_pool_ndim=non_pool_ndim)
ccode += """
// update maximum
dtype_%(x)s a;
dtype_%(ex)s ea;
if (%(total_ndim)s == 4) {
a = ((dtype_%(x)s*)(PyArray_GETPTR4(%(x)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
ea = ((dtype_%(ex)s*)(PyArray_GETPTR4(%(ex)s,i_idx[0],i_idx[1],i_idx[2],i_idx[3])))[0];
}
else {
a = ((dtype_%(x)s*)(PyArray_GetPtr(%(x)s,i_idx)))[0];
ea = ((dtype_%(ex)s*)(PyArray_GetPtr(%(ex)s,i_idx)))[0];
}
if (a > collector) {
collector = a;
eval_collector = ea;
}
"""
for i in xrange(nd):
ccode += """
} // for loop over region
"""
ccode += """
z[0] = eval_collector;
"""
for i in xrange(nd):
ccode += """
} // loop over pooling dimension
"""
ccode += """
} // for loop over non-pooling dimensions
} // if z_prod
"""
return ccode % locals()
def c_code_cache_version(self):
return (1, self.openmp)
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@test.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@test.com',
password='password123',
name='John Smith'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
class tidebit (Exchange):
def describe(self):
return self.deep_extend(super(tidebit, self).describe(), {
'id': 'tidebit',
'name': 'TideBit',
'countries': ['HK'],
'rateLimit': 1000,
'version': 'v2',
'has': {
'fetchDepositAddress': True,
'CORS': True,
'fetchTickers': True,
'fetchOHLCV': True,
'withdraw': True,
},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'12h': '720',
'1d': '1440',
'3d': '4320',
'1w': '10080',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/39034921-e3acf016-4480-11e8-9945-a6086a1082fe.jpg',
'api': 'https://www.tidebit.com',
'www': 'https://www.tidebit.com',
'doc': [
'https://www.tidebit.com/documents/api/guide',
'https://www.tidebit.com/swagger/#/default',
],
'referral': 'http://bit.ly/2IX0LrM',
},
'api': {
'public': {
'get': [
'markets',
'tickers',
'tickers/{market}',
'timestamp',
'trades',
'trades/{market}',
'order_book',
'order',
'k_with_pending_trades',
'k',
'depth',
],
'post': [],
},
'private': {
'get': [
'addresses/{address}',
'deposits/history',
'deposits/get_deposit',
'deposits/deposit_address',
'historys/orders',
'historys/vouchers',
'historys/accounts',
'historys/snapshots',
'linkage/get_status',
'members/me',
'order',
'orders',
'partners/orders/{id}/trades',
'referral_commissions/get_undeposited',
'referral_commissions/get_graph_data',
'trades/my',
'withdraws/bind_account_list',
'withdraws/get_withdraw_account',
'withdraws/fetch_bind_info',
],
'post': [
'deposits/deposit_cash',
'favorite_markets/update',
'order/delete',
'orders',
'orders/multi',
'orders/clear',
'referral_commissions/deposit',
'withdraws/apply',
'withdraws/bind_bank',
'withdraws/bind_address',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.2 / 100,
},
'funding': {
'tierBased': False,
'percentage': True,
'withdraw': {}, # There is only 1% fee on withdrawals to your bank account.
},
},
'exceptions': {
'2002': InsufficientFunds,
'2003': OrderNotFound,
},
})
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.privateGetDepositAddress(self.extend(request, params))
if 'success' in response:
if response['success']:
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'addressTag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def fetch_markets(self, params={}):
response = self.publicGetMarkets(params)
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'id')
symbol = self.safe_string(market, 'name')
baseId, quoteId = symbol.split('/')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetMembersMe(params)
balances = self.safe_value(response, 'accounts')
result = {'info': balances}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'balance')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
if limit is not None:
request['limit'] = limit # default = 300
request['market'] = market['id']
response = self.publicGetDepth(self.extend(request, params))
timestamp = self.safe_timestamp(response, 'timestamp')
return self.parse_order_book(response, timestamp)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'at')
ticker = self.safe_value(ticker, 'ticker', {})
symbol = None
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'ask': self.safe_float(ticker, 'sell'),
'bidVolume': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'change': None,
'percentage': None,
'previousClose': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTickers(params)
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = None
symbol = id
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
else:
baseId = id[0:3]
quoteId = id[3:6]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetTickersMarket(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
timestamp = self.parse8601(self.safe_string(trade, 'created_at'))
id = self.safe_string(trade, 'id')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'volume')
cost = self.safe_float(trade, 'funds')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': None,
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
ohlcv[1],
ohlcv[2],
ohlcv[3],
ohlcv[4],
ohlcv[5],
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
if limit is None:
limit = 30 # default is 30
request = {
'market': market['id'],
'period': self.timeframes[timeframe],
'limit': limit,
}
if since is not None:
request['timestamp'] = int(since / 1000)
else:
request['timestamp'] = 1800000
response = self.publicGetK(self.extend(request, params))
if response == 'null':
return []
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'done': 'closed',
'wait': 'open',
'cancel': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
else:
marketId = order['market']
symbol = self.markets_by_id[marketId]['symbol']
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
status = self.parse_order_status(self.safe_string(order, 'state'))
id = self.safe_string(order, 'id')
type = self.safe_string(order, 'ord_type')
side = self.safe_string(order, 'side')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'volume')
filled = self.safe_float(order, 'executed_volume')
remaining = self.safe_float(order, 'remaining_volume')
cost = None
if price is not None:
if filled is not None:
cost = price * filled
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': remaining,
'cost': cost,
'trades': None,
'fee': None,
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'market': self.market_id(symbol),
'side': side,
'volume': str(amount),
'ord_type': type,
}
if type == 'limit':
request['price'] = str(price)
response = self.privatePostOrders(self.extend(request, params))
market = self.markets_by_id[response['market']]
return self.parse_order(response, market)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
result = self.privatePostOrderDelete(self.extend(request, params))
order = self.parse_order(result)
status = self.safe_string(order, 'status')
if status == 'closed' or status == 'canceled':
raise OrderNotFound(self.id + ' ' + self.json(order))
return order
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
id = self.safe_string(params, 'id')
if id is None:
raise ExchangeError(self.id + ' withdraw() requires an extra `id` param(withdraw account id according to withdraws/bind_account_list endpoint')
request = {
'id': id,
'currency_type': 'coin', # or 'cash'
'currency': currency['id'],
'body': amount,
# 'address': address, # they don't allow withdrawing to direct addresses?
}
if tag is not None:
request['memo'] = tag
result = self.privatePostWithdrawsApply(self.extend(request, params))
return {
'info': result,
'id': None,
}
def nonce(self):
return self.milliseconds()
def encode_params(self, params):
return self.urlencode(self.keysort(params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + 'api/' + self.version + '/' + self.implode_params(path, params) + '.json'
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + request
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
sortedByKey = self.keysort(self.extend({
'access_key': self.apiKey,
'tonce': nonce,
}, params))
query = self.urlencode(sortedByKey)
payload = method + '|' + request + '|' + query
signature = self.hmac(self.encode(payload), self.encode(self.secret))
suffix = query + '&signature=' + signature
if method == 'GET':
url += '?' + suffix
else:
body = suffix
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response):
if code == 400:
error = self.safe_value(response, 'error')
errorCode = self.safe_string(error, 'code')
feedback = self.id + ' ' + self.json(response)
exceptions = self.exceptions
if errorCode in exceptions:
raise exceptions[errorCode](feedback)
# fallback to default error handler
|
"""
Copyright 2016, Andrew Lin
All rights reserved.
This software is licensed under the BSD 3-Clause License.
See LICENSE.txt at the root of the project or
https://opensource.org/licenses/BSD-3-Clause
"""
import pytest
from app.gameshow import make_gameshow
@pytest.fixture
def app():
"""The whole gameshow app."""
a = make_gameshow()
return a.test_client()
def test_scoreboard(app):
"""Test / endpoint."""
response = app.get('/')
assert response.status_code == 200
assert response.content_type.startswith('text/html')
def test_proctor(app):
"""Test /proctor endpoint."""
response = app.get('/proctor')
assert response.status_code == 200
assert response.content_type.startswith('text/html')
def test_players(app):
"""Test /players endpoint."""
|
from importlib import import_module
try:
from django.core.urlresolvers import reverse
except ImportError: # Django 1.11
from django.urls import reverse
from django.template.loader import render_to_string
from jet.dashboard import modules
from jet.dashboard.models import UserDashboardModule
from django.utils.translation import ugettext_lazy as _
from jet.ordered_set import OrderedSet
from jet.utils import get_admin_site_name, context_to_dict
try:
from django.template.context_processors import csrf
except ImportError:
from django.core.context_processors import csrf
class Dashboard(object):
"""
Base dashboard class. All custom dashboards should inherit it.
"""
#: Number of columns in which widgets can be placed
columns = 2
#: Dashboard Modules (widgets) that dashboard is filled with, when the user open it for the first time
#:
#: List of dashboard module **instances**
children = None
#: Dashboard Modules (widgets) that user can add to dashboard at any time
# (not created when the user open dashboard for the first time)
#:
#: List of dashboard module **classes**
available_children = None
app_label = None
context = None
modules = None
class Media:
css = ()
js = ()
def __init__(self, context, **kwargs):
for key in kwargs:
if hasattr(self.__class__, key):
setattr(self, key, kwargs[key])
self.children = self.children or []
self.available_children = self.available_children or []
self.set_context(context)
def set_context(self, context):
self.context = context
self.init_with_context(context)
self.load_modules()
def init_with_context(self, context):
"""
Override this method to fill your custom **Dashboard** class with widgets.
You should add your widgets to ``children`` and ``available_children`` attributes.
Usage example:
.. code-block:: python
from django.utils.translation import ugettext_lazy as _
from jet.dashboard import modules
from jet.dashboard.dashboard import Dashboard, AppIndexDashboard
class CustomIndexDashboard(Dashboard):
columns = 3
def init_with_context(self, context):
self.available_children.append(modules.LinkList)
self.children.append(modules.LinkList(
_('Support'),
children=[
{
'title': _('Django documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Django "django-users" mailing list'),
'url': 'http://groups.google.com/group/django-users',
'external': True,
},
{
'title': _('Django irc channel'),
'url': 'irc://irc.freenode.net/django',
'external': True,
},
],
column=0,
order=0
))
"""
pass
def load_module(self, module_fullname):
package, module_name = module_fullname.rsplit('.', 1)
package = import_module(package)
module = getattr(package, module_name)
return module
def create_initial_module_models(self, user):
module_models = []
i = 0
for module in self.children:
column = module.column if module.column is not None else i % self.columns
order = module.order if module.order is not None else int(i / self.columns)
module_models.append(UserDashboardModule.objects.create(
title=module.title,
app_label=self.app_label,
user=user.pk,
module=module.fullname(),
column=column,
order=order,
settings=module.dump_settings(),
children=module.dump_children()
))
i += 1
return module_models
def load_modules(self):
module_models = UserDashboardModule.objects.filter(
app_label=self.app_label,
user=self.context['request'].user.pk
).all()
if len(module_models) == 0:
module_models = self.create_initial_module_models(self.context['request'].user)
loaded_modules = []
for module_model in module_models:
module_cls = module_model.load_module()
if module_cls is not None:
module = module_cls(model=module_model, context=self.context)
loaded_modules.append(module)
self.modules = loaded_modules
def render(self):
context = context_to_dict(self.context)
context.update({
'columns': range(self.columns),
'modules': self.modules,
'app_label': self.app_label,
})
context.update(csrf(context['request']))
return render_to_string('jet.dashboard/dashboard.html', context)
def render_tools(self):
context = context_to_dict(self.context)
context.update({
'children': self.children,
'app_label': self.app_label,
'available_children': self.available_children
})
context.update(csrf(context['request']))
return render_to_string('jet.dashboard/dashboard_tools.html', context)
def media(self):
unique_css = OrderedSet()
unique_js = OrderedSet()
for js in getattr(self.Media, 'js', ()):
unique_js.add(js)
for css in getattr(self.Media, 'css', ()):
unique_css.add(css)
for module in self.modules:
for js in getattr(module.Media, 'js', ()):
unique_js.add(js)
for css in getattr(module.Media, 'css', ()):
unique_css.add(css)
class Media:
css = list(unique_css)
js = list(unique_js)
return Media
class AppIndexDashboard(Dashboard):
def get_app_content_types(self):
return self.app_label + '.*',
def models(self):
return self.app_label + '.*',
class DefaultIndexDashboard(Dashboard):
columns = 3
def init_with_context(self, context):
self.available_children.append(modules.LinkList)
self.available_children.append(modules.Feed)
site_name = get_admin_site_name(context)
# append a link list module for "quick links"
self.children.append(modules.LinkList(
_('Quick links'),
layout='inline',
draggable=False,
deletable=False,
collapsible=False,
children=[
[_('Return to site'), '/'],
[_('Change password'),
reverse('%s:password_change' % site_name)],
[_('Log out'), reverse('%s:logout' % site_name)],
],
column=0,
order=0
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('Applications'),
exclude=('auth.*',),
column=1,
order=0
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
_('Administration'),
models=('auth.*',),
column=2,
order=0
))
# append a recent actions module
self.children.append(modules.RecentActions(
_('Recent Actions'),
10,
column=0,
order=1
))
# append a feed module
self.children.append(modules.Feed(
_('Latest Django News'),
feed_url='http://www.djangoproject.com/rss/weblog/',
limit=5,
column=1,
order=1
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Support'),
children=[
{
'title': _('Django documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Django "django-users" mailing list'),
'url': 'http://groups.google.com/group/django-users',
'external': True,
},
{
'title': _('Django irc channel'),
'url': 'irc://irc.freenode.net/django',
'external': True,
},
],
column=2,
order=1
))
class DefaultAppIndexDashboard(AppIndexDashboard):
def init_with_context(self, context):
self.available_children.append(modules.LinkList)
self.children.append(modules.ModelList(
title=_('Application models'),
models=self.models(),
column=0,
order=0
))
self.children.append(modules.RecentActions(
include_list=self.get_app_content_types(),
column=1,
order=0
))
class DashboardUrls(object):
_urls = []
def get_urls(self):
return self._urls
def register_url(self, url):
self._urls.append(url)
def register_urls(self, urls):
self._urls.extend(urls)
urls = DashboardUrls()
|
import binascii
import os
import subprocess
import sys
from migen.fhdl import *
from litex.soc.interconnect.csr import *
def git_root():
if sys.platform == "win32":
# Git on Windows is likely to use Unix-style paths (`/c/path/to/repo`),
# whereas directories passed to Python should be Windows-style paths
# (`C:/path/to/repo`) (because Python calls into the Windows API).
# `cygpath` converts between the two.
git = subprocess.Popen(
"git rev-parse --show-toplevel",
cwd=os.path.dirname(__file__),
stdout=subprocess.PIPE,
)
path = subprocess.check_output(
"cygpath -wf -",
stdin=git.stdout,
)
git.wait()
return path.decode('ascii').strip()
else:
return subprocess.check_output(
"git rev-parse --show-toplevel",
shell=True,
cwd=os.path.dirname(__file__),
).decode('ascii').strip()
def git_commit():
data = subprocess.check_output(
"git rev-parse HEAD",
shell=True,
cwd=git_root(),
).decode('ascii').strip()
return binascii.unhexlify(data)
def git_describe():
return subprocess.check_output(
"git describe --dirty",
shell=True,
cwd=git_root(),
).decode('ascii').strip()
def git_status():
return subprocess.check_output(
"git status --short",
shell=True,
cwd=git_root(),
).decode('ascii').strip()
class GitInfo(Module, AutoCSR):
def __init__(self):
commit = sum(int(x) << (i*8) for i, x in enumerate(reversed(git_commit())))
self.commit = CSRStatus(160)
# FIXME: This should be a read-only Memory object
#extradata = [ord(x) for x in "\0".join([
# "https://github.com/timvideos/HDMI2USB-misoc-firmware.git",
# git_describe(),
# git_status(),
# "",
# ])]
#self.extradata = CSRStatus(len(extradata)*8)
self.comb += [
self.commit.status.eq(commit),
# self.extradata.status.eq(extradata),
]
|
import os
from collections import OrderedDict
from copy import copy
from conans.errors import ConanException
from conans.util.conan_v2_mode import conan_v2_behavior
DEFAULT_INCLUDE = "include"
DEFAULT_LIB = "lib"
DEFAULT_BIN = "bin"
DEFAULT_RES = "res"
DEFAULT_SHARE = "share"
DEFAULT_BUILD = ""
DEFAULT_FRAMEWORK = "Frameworks"
COMPONENT_SCOPE = "::"
class DefaultOrderedDict(OrderedDict):
def __init__(self, factory):
self.factory = factory
super(DefaultOrderedDict, self).__init__()
def __getitem__(self, key):
if key not in self.keys():
super(DefaultOrderedDict, self).__setitem__(key, self.factory())
super(DefaultOrderedDict, self).__getitem__(key).name = key
return super(DefaultOrderedDict, self).__getitem__(key)
def __copy__(self):
the_copy = DefaultOrderedDict(self.factory)
for key, value in super(DefaultOrderedDict, self).items():
the_copy[key] = value
return the_copy
class _CppInfo(object):
""" Object that stores all the necessary information to build in C/C++.
It is intended to be system independent, translation to
specific systems will be produced from this info
"""
def __init__(self):
self._name = None
self.names = {}
self.system_libs = [] # Ordered list of system libraries
self.includedirs = [] # Ordered list of include paths
self.srcdirs = [] # Ordered list of source paths
self.libdirs = [] # Directories to find libraries
self.resdirs = [] # Directories to find resources, data, etc
self.bindirs = [] # Directories to find executables and shared libs
self.builddirs = []
self.frameworks = [] # Macos .framework
self.frameworkdirs = []
self.rootpaths = []
self.libs = [] # The libs to link against
self.defines = [] # preprocessor definitions
self.cflags = [] # pure C flags
self.cxxflags = [] # C++ compilation flags
self.sharedlinkflags = [] # linker flags
self.exelinkflags = [] # linker flags
self.build_modules = []
self.filenames = {} # name of filename to create for various generators
self.rootpath = ""
self.sysroot = ""
self._build_modules_paths = None
self._include_paths = None
self._lib_paths = None
self._bin_paths = None
self._build_paths = None
self._res_paths = None
self._src_paths = None
self._framework_paths = None
self.version = None # Version of the conan package
self.description = None # Description of the conan package
# When package is editable, filter_empty=False, so empty dirs are maintained
self.filter_empty = True
def _filter_paths(self, paths):
abs_paths = [os.path.join(self.rootpath, p)
if not os.path.isabs(p) else p for p in paths]
if self.filter_empty:
return [p for p in abs_paths if os.path.isdir(p)]
else:
return abs_paths
@property
def build_modules_paths(self):
if self._build_modules_paths is None:
self._build_modules_paths = [os.path.join(self.rootpath, p) if not os.path.isabs(p)
else p for p in self.build_modules]
return self._build_modules_paths
@property
def include_paths(self):
if self._include_paths is None:
self._include_paths = self._filter_paths(self.includedirs)
return self._include_paths
@property
def lib_paths(self):
if self._lib_paths is None:
self._lib_paths = self._filter_paths(self.libdirs)
return self._lib_paths
@property
def src_paths(self):
if self._src_paths is None:
self._src_paths = self._filter_paths(self.srcdirs)
return self._src_paths
@property
def bin_paths(self):
if self._bin_paths is None:
self._bin_paths = self._filter_paths(self.bindirs)
return self._bin_paths
@property
def build_paths(self):
if self._build_paths is None:
self._build_paths = self._filter_paths(self.builddirs)
return self._build_paths
@property
def res_paths(self):
if self._res_paths is None:
self._res_paths = self._filter_paths(self.resdirs)
return self._res_paths
@property
def framework_paths(self):
if self._framework_paths is None:
self._framework_paths = self._filter_paths(self.frameworkdirs)
return self._framework_paths
@property
def name(self):
conan_v2_behavior("Use 'get_name(generator)' instead")
return self._name
@name.setter
def name(self, value):
self._name = value
def get_name(self, generator):
return self.names.get(generator, self._name)
def get_filename(self, generator):
result = self.filenames.get(generator)
if result:
return result
return self.get_name(generator)
# Compatibility for 'cppflags' (old style property to allow decoration)
def get_cppflags(self):
conan_v2_behavior("'cpp_info.cppflags' is deprecated, use 'cxxflags' instead")
return self.cxxflags
def set_cppflags(self, value):
conan_v2_behavior("'cpp_info.cppflags' is deprecated, use 'cxxflags' instead")
self.cxxflags = value
cppflags = property(get_cppflags, set_cppflags)
class Component(_CppInfo):
def __init__(self, rootpath):
super(Component, self).__init__()
self.rootpath = rootpath
self.includedirs.append(DEFAULT_INCLUDE)
self.libdirs.append(DEFAULT_LIB)
self.bindirs.append(DEFAULT_BIN)
self.resdirs.append(DEFAULT_RES)
self.builddirs.append(DEFAULT_BUILD)
self.frameworkdirs.append(DEFAULT_FRAMEWORK)
self.requires = []
class CppInfo(_CppInfo):
""" Build Information declared to be used by the CONSUMERS of a
conans. That means that consumers must use this flags and configs i order
to build properly.
Defined in user CONANFILE, directories are relative at user definition time
"""
def __init__(self, ref_name, root_folder):
super(CppInfo, self).__init__()
self._ref_name = ref_name
self._name = ref_name
self.rootpath = root_folder # the full path of the package in which the conans is found
self.includedirs.append(DEFAULT_INCLUDE)
self.libdirs.append(DEFAULT_LIB)
self.bindirs.append(DEFAULT_BIN)
self.resdirs.append(DEFAULT_RES)
self.builddirs.append(DEFAULT_BUILD)
self.frameworkdirs.append(DEFAULT_FRAMEWORK)
self.components = DefaultOrderedDict(lambda: Component(self.rootpath))
# public_deps is needed to accumulate list of deps for cmake targets
self.public_deps = []
self._configs = {}
def __str__(self):
return self._ref_name
def get_name(self, generator):
name = super(CppInfo, self).get_name(generator)
# Legacy logic for pkg_config generator
from conans.client.generators.pkg_config import PkgConfigGenerator
if generator == PkgConfigGenerator.name:
fallback = self._name.lower() if self._name != self._ref_name else self._ref_name
if PkgConfigGenerator.name not in self.names and self._name != self._name.lower():
conan_v2_behavior("Generated file and name for {gen} generator will change in"
" Conan v2 to '{name}'. Use 'self.cpp_info.names[\"{gen}\"]"
" = \"{fallback}\"' in your recipe to continue using current name."
.format(gen=PkgConfigGenerator.name, name=name, fallback=fallback))
name = self.names.get(generator, fallback)
return name
@property
def configs(self):
return self._configs
def __getattr__(self, config):
def _get_cpp_info():
result = _CppInfo()
result.filter_empty = self.filter_empty
result.rootpath = self.rootpath
result.sysroot = self.sysroot
result.includedirs.append(DEFAULT_INCLUDE)
result.libdirs.append(DEFAULT_LIB)
result.bindirs.append(DEFAULT_BIN)
result.resdirs.append(DEFAULT_RES)
result.builddirs.append(DEFAULT_BUILD)
result.frameworkdirs.append(DEFAULT_FRAMEWORK)
return result
return self._configs.setdefault(config, _get_cpp_info())
def _raise_incorrect_components_definition(self, package_name, package_requires):
# Raise if mixing components
if (self.includedirs != [DEFAULT_INCLUDE] or
self.libdirs != [DEFAULT_LIB] or
self.bindirs != [DEFAULT_BIN] or
self.resdirs != [DEFAULT_RES] or
self.builddirs != [DEFAULT_BUILD] or
self.frameworkdirs != [DEFAULT_FRAMEWORK] or
self.libs or
self.system_libs or
self.frameworks or
self.defines or
self.cflags or
self.cxxflags or
self.sharedlinkflags or
self.exelinkflags or
self.build_modules) and self.components:
raise ConanException("self.cpp_info.components cannot be used with self.cpp_info "
"global values at the same time")
if self._configs and self.components:
raise ConanException("self.cpp_info.components cannot be used with self.cpp_info configs"
" (release/debug/...) at the same time")
# Raise on component name
for comp_name, comp in self.components.items():
if comp_name == package_name:
raise ConanException("Component name cannot be the same as the package name: '%s'"
% comp_name)
if self.components:
comp_requires = set()
for comp_name, comp in self.components.items():
for comp_require in comp.requires:
if COMPONENT_SCOPE in comp_require:
comp_requires.add(
comp_require[:comp_require.find(COMPONENT_SCOPE)])
pkg_requires = [require.ref.name for require in package_requires.values()]
# Raise on components requires without package requires
for pkg_require in pkg_requires:
if pkg_require not in comp_requires:
raise ConanException("Package require '%s' not used in components requires"
% pkg_require)
# Raise on components requires requiring inexistent package requires
for comp_require in comp_requires:
if comp_require not in pkg_requires:
raise ConanException("Package require '%s' declared in components requires "
"but not defined as a recipe requirement" % comp_require)
class _BaseDepsCppInfo(_CppInfo):
def __init__(self):
super(_BaseDepsCppInfo, self).__init__()
def update(self, dep_cpp_info):
def merge_lists(seq1, seq2):
return [s for s in seq1 if s not in seq2] + seq2
self.system_libs = merge_lists(self.system_libs, dep_cpp_info.system_libs)
self.includedirs = merge_lists(self.includedirs, dep_cpp_info.include_paths)
self.srcdirs = merge_lists(self.srcdirs, dep_cpp_info.src_paths)
self.libdirs = merge_lists(self.libdirs, dep_cpp_info.lib_paths)
self.bindirs = merge_lists(self.bindirs, dep_cpp_info.bin_paths)
self.resdirs = merge_lists(self.resdirs, dep_cpp_info.res_paths)
self.builddirs = merge_lists(self.builddirs, dep_cpp_info.build_paths)
self.frameworkdirs = merge_lists(self.frameworkdirs, dep_cpp_info.framework_paths)
self.libs = merge_lists(self.libs, dep_cpp_info.libs)
self.frameworks = merge_lists(self.frameworks, dep_cpp_info.frameworks)
self.build_modules = merge_lists(self.build_modules, dep_cpp_info.build_modules_paths)
self.rootpaths.append(dep_cpp_info.rootpath)
# Note these are in reverse order
self.defines = merge_lists(dep_cpp_info.defines, self.defines)
self.cxxflags = merge_lists(dep_cpp_info.cxxflags, self.cxxflags)
self.cflags = merge_lists(dep_cpp_info.cflags, self.cflags)
self.sharedlinkflags = merge_lists(dep_cpp_info.sharedlinkflags, self.sharedlinkflags)
self.exelinkflags = merge_lists(dep_cpp_info.exelinkflags, self.exelinkflags)
if not self.sysroot:
self.sysroot = dep_cpp_info.sysroot
@property
def build_modules_paths(self):
return self.build_modules
@property
def include_paths(self):
return self.includedirs
@property
def lib_paths(self):
return self.libdirs
@property
def src_paths(self):
return self.srcdirs
@property
def bin_paths(self):
return self.bindirs
@property
def build_paths(self):
return self.builddirs
@property
def res_paths(self):
return self.resdirs
@property
def framework_paths(self):
return self.frameworkdirs
class DepCppInfo(object):
def __init__(self, cpp_info):
self._cpp_info = cpp_info
self._libs = None
self._system_libs = None
self._frameworks = None
self._defines = None
self._cxxflags = None
self._cflags = None
self._sharedlinkflags = None
self._exelinkflags = None
self._include_paths = None
self._lib_paths = None
self._bin_paths = None
self._build_paths = None
self._res_paths = None
self._src_paths = None
self._framework_paths = None
self._build_module_paths = None
self._sorted_components = None
self._check_component_requires()
def __str__(self):
return str(self._cpp_info)
def __getattr__(self, item):
try:
attr = self._cpp_info.__getattribute__(item)
except AttributeError: # item is not defined, get config (CppInfo)
attr = self._cpp_info.__getattr__(item)
return attr
@staticmethod
def _merge_lists(seq1, seq2):
return seq1 + [s for s in seq2 if s not in seq1]
def _aggregated_values(self, item):
values = getattr(self, "_%s" % item)
if values is not None:
return values
values = getattr(self._cpp_info, item)
if self._cpp_info.components:
for component in self._get_sorted_components().values():
values = self._merge_lists(values, getattr(component, item))
setattr(self, "_%s" % item, values)
return values
def _aggregated_paths(self, item):
paths = getattr(self, "_%s_paths" % item)
if paths is not None:
return paths
paths = getattr(self._cpp_info, "%s_paths" % item)
if self._cpp_info.components:
for component in self._get_sorted_components().values():
paths = self._merge_lists(paths, getattr(component, "%s_paths" % item))
setattr(self, "_%s_paths" % item, paths)
return paths
@staticmethod
def _filter_component_requires(requires):
return [r for r in requires if COMPONENT_SCOPE not in r]
def _check_component_requires(self):
for comp_name, comp in self._cpp_info.components.items():
if not all([require in self._cpp_info.components for require in
self._filter_component_requires(comp.requires)]):
raise ConanException("Component '%s' declares a missing dependency" % comp_name)
bad_requires = [r for r in comp.requires if r.startswith(COMPONENT_SCOPE)]
if bad_requires:
msg = "Leading character '%s' not allowed in %s requires: %s. Omit it to require " \
"components inside the same package." \
% (COMPONENT_SCOPE, comp_name, bad_requires)
raise ConanException(msg)
def _get_sorted_components(self):
"""
Sort Components from most dependent one first to the less dependent one last
:return: List of sorted components
"""
if not self._sorted_components:
if any([[require for require in self._filter_component_requires(comp.requires)]
for comp in self._cpp_info.components.values()]):
ordered = OrderedDict()
components = copy(self._cpp_info.components)
while len(ordered) != len(self._cpp_info.components):
# Search next element to be processed
for comp_name, comp in components.items():
# Check if component is not required and can be added to ordered
if comp_name not in [require for dep in components.values() for require in
self._filter_component_requires(dep.requires)]:
ordered[comp_name] = comp
del components[comp_name]
break
else:
raise ConanException("There is a dependency loop in "
"'self.cpp_info.components' requires")
self._sorted_components = ordered
else: # If components do not have requirements, keep them in the same order
self._sorted_components = self._cpp_info.components
return self._sorted_components
@property
def build_modules_paths(self):
return self._aggregated_paths("build_modules")
@property
def include_paths(self):
return self._aggregated_paths("include")
@property
def lib_paths(self):
return self._aggregated_paths("lib")
@property
def src_paths(self):
return self._aggregated_paths("src")
@property
def bin_paths(self):
return self._aggregated_paths("bin")
@property
def build_paths(self):
return self._aggregated_paths("build")
@property
def res_paths(self):
return self._aggregated_paths("res")
@property
def framework_paths(self):
return self._aggregated_paths("framework")
@property
def libs(self):
return self._aggregated_values("libs")
@property
def system_libs(self):
return self._aggregated_values("system_libs")
@property
def frameworks(self):
return self._aggregated_values("frameworks")
@property
def defines(self):
return self._aggregated_values("defines")
@property
def cxxflags(self):
return self._aggregated_values("cxxflags")
@property
def cflags(self):
return self._aggregated_values("cflags")
@property
def sharedlinkflags(self):
return self._aggregated_values("sharedlinkflags")
@property
def exelinkflags(self):
return self._aggregated_values("exelinkflags")
class DepsCppInfo(_BaseDepsCppInfo):
""" Build Information necessary to build a given conans. It contains the
flags, directories and options if its dependencies. The conans CONANFILE
should use these flags to pass them to the underlaying build system (Cmake, make),
so deps info is managed
"""
def __init__(self):
super(DepsCppInfo, self).__init__()
self._dependencies = OrderedDict()
self._configs = {}
def __getattr__(self, config):
return self._configs.setdefault(config, _BaseDepsCppInfo())
@property
def configs(self):
return self._configs
@property
def dependencies(self):
return self._dependencies.items()
@property
def deps(self):
return self._dependencies.keys()
def __getitem__(self, item):
return self._dependencies[item]
def add(self, pkg_name, cpp_info):
assert pkg_name == str(cpp_info), "'{}' != '{}'".format(pkg_name, cpp_info)
assert isinstance(cpp_info, (CppInfo, DepCppInfo))
self._dependencies[pkg_name] = cpp_info
super(DepsCppInfo, self).update(cpp_info)
for config, cpp_info in cpp_info.configs.items():
self._configs.setdefault(config, _BaseDepsCppInfo()).update(cpp_info)
|
# Copyright (c) 2017-2021, Lawrence Livermore National Security, LLC and
# other Shroud Project Developers.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
# #######################################################################
#
# Test Python API generated from references.yaml.
#
from __future__ import print_function
import numpy as np
import unittest
import arrayclass
class Arrayclass(unittest.TestCase):
"""Test struct problem"""
def XXsetUp(self):
""" Setting up for the test """
print("FooTest:setUp_:begin")
## do something...
print("FooTest:setUp_:end")
def XXtearDown(self):
"""Cleaning up after the test"""
print("FooTest:tearDown_:begin")
## do something...
print("FooTest:tearDown_:end")
def test_ArrayWrapper(self):
arrinst = arrayclass.ArrayWrapper()
arrinst.setSize(10)
self.assertEqual(10, arrinst.getSize())
isize = arrinst.fillSize()
self.assertEqual(10, isize)
arrinst.allocate()
arr = arrinst.getArray()
self.assertIsInstance(arr, np.ndarray)
self.assertEqual('float64', arr.dtype.name)
self.assertEqual(1, arr.ndim)
self.assertEqual((10,), arr.shape)
self.assertEqual(10, arr.size)
# Make sure we're pointing to the array in the instance.
arr[:] = 0.0
self.assertEqual(0.0, arrinst.sumArray())
arr[:] = 1.0
self.assertEqual(10.0, arrinst.sumArray())
arr[:] = 0.0
arr[0] = 10.0
arr[9] = 1.0
self.assertEqual(11.0, arrinst.sumArray())
arrconst = arrinst.getArrayConst()
self.assertIsInstance(arrconst, np.ndarray)
self.assertEqual('float64', arrconst.dtype.name)
self.assertEqual(1, arrconst.ndim)
self.assertEqual((10,), arrconst.shape)
self.assertEqual(10, arrconst.size)
# Both getArray and getArrayConst return a NumPy array to the
# same pointer. But a new array is created each time.
self.assertIsNot(arr, arrconst)
arr3 = arrinst.getArrayC()
self.assertIsInstance(arr3, np.ndarray)
self.assertEqual('float64', arr3.dtype.name)
self.assertEqual(1, arr3.ndim)
self.assertEqual((10,), arr3.shape)
self.assertEqual(10, arr3.size)
arr4 = arrinst.getArrayConstC()
self.assertIsInstance(arr4, np.ndarray)
self.assertEqual('float64', arr4.dtype.name)
self.assertEqual(1, arr4.ndim)
self.assertEqual((10,), arr4.shape)
self.assertEqual(10, arr4.size)
arr5 = arrinst.fetchArrayPtr()
self.assertIsInstance(arr4, np.ndarray)
self.assertEqual('float64', arr5.dtype.name)
self.assertEqual(1, arr5.ndim)
self.assertEqual((10,), arr5.shape)
self.assertEqual(10, arr5.size)
arr6 = arrinst.fetchArrayRef()
self.assertIsInstance(arr4, np.ndarray)
self.assertEqual('float64', arr6.dtype.name)
self.assertEqual(1, arr6.ndim)
self.assertEqual((10,), arr6.shape)
self.assertEqual(10, arr6.size)
arr7 = arrinst.fetchArrayPtrConst()
self.assertIsInstance(arr4, np.ndarray)
self.assertEqual('float64', arr7.dtype.name)
self.assertEqual(1, arr7.ndim)
self.assertEqual((10,), arr7.shape)
self.assertEqual(10, arr7.size)
arr8 = arrinst.fetchArrayRefConst()
self.assertIsInstance(arr4, np.ndarray)
self.assertEqual('float64', arr8.dtype.name)
self.assertEqual(1, arr8.ndim)
self.assertEqual((10,), arr8.shape)
self.assertEqual(10, arr8.size)
with self.assertRaises(ValueError) as context:
arrinst.checkPtr(None)
self.assertTrue("called with invalid PyCapsule object"
in str(context.exception))
voidptr = arrinst.fetchVoidPtr()
self.assertEqual('PyCapsule', voidptr.__class__.__name__)
self.assertTrue(arrinst.checkPtr(voidptr))
voidptr = arrinst.fetchVoidRef()
self.assertEqual('PyCapsule', voidptr.__class__.__name__)
self.assertTrue(arrinst.checkPtr(voidptr))
# creating a new test suite
newSuite = unittest.TestSuite()
# adding a test case
newSuite.addTest(unittest.makeSuite(Arrayclass))
if __name__ == "__main__":
unittest.main()
|
"""
Provide help message for command line interface commands.
"""
PROVIDER_NAME_HELP = 'A provider of hosting for software development and version control name.'
ORGANIZATION_NAME_HELP = "The provider's organization name."
REPOSITORY_NAME_HELP = "The provider's repository name."
BRANCH = 'A branch.'
BASE_BRANCH = 'A branch to compare a project version with. Usually, a default branch.'
HEAD_BRANCH = 'A branch to get its project version for comparison. Usually, a feature branch.'
PROJECT_VERSION = 'A project version.'
|
# Online Python compiler (interpreter) to run Python online.
# Write Python 3 code in this online editor and run it.
import numpy as np
list_a = []
for i in range(2):
for j in range(5):
list_a.append(i)
list_a = np.random.permutation(list_a)
print('class labels')
print(list_a)
list_a = np.array(list_a)
index_i = 0
classid_of_index0=list_a[index_i]
print('class_of_index0: ', classid_of_index0)
classid_of_index0_locations = np.where(list_a == classid_of_index0)
classid_of_index0_locations = classid_of_index0_locations[0]
print('class_of_index0_locations', classid_of_index0_locations)
print(classid_of_index0_locations != index_i)
same_index_list = classid_of_index0_locations[classid_of_index0_locations != index_i]
print(same_index_list)
print(same_index_list[0:2])
num_tokens_vec = [5,6,7,5,4,3,5,4,6,7]
for pos in same_index_list[0:2]:
print(num_tokens_vec[pos])
max_val = tuple(num_tokens_vec[pos] for pos in same_index_list[0:2])
max_val1 = max(max_val)
print(max_val)
print(max_val1)
|
import vtk
from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
import math
import numpy as np
import numpy.matlib
import os
import json
import cv2
# Z
# /
# /
# /
# ---------- X
# |
# |
# |
# Y
class vtkRenderer():
def __init__(self, widget=None):
self.ren = vtk.vtkRenderer()
if widget is not None:
# Qt Widget Mode
self.qtwidget_mode = True
#### Init
# self.vtkWidget = QVTKRenderWindowInteractor(self.centralwidget)
# self.vtkWidget.setGeometry(0,0,200,200)
# self.vtkRenderer = calipy.vtkRenderer(self.vtkWidget)
# Qt Widget
self.vtkWidget = widget
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
self.iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
self.iren.Initialize()
self.iren.Start()
else:
# Window Mode
self.qtwidget_mode = False
# Make empty window
self.renWin = vtk.vtkRenderWindow()
self.renWin.AddRenderer(self.ren)
self.renWin.SetSize(960, 540)
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
self.iren.SetRenderWindow(self.renWin)
self.iren.Initialize()
self.ren.SetBackground(0, 0.1, 0)
self.actor_list = {}
axes = vtk.vtkAxesActor()
self.ren.AddActor(axes)
self.actor_list["axes"] = axes
self.ren.ResetCamera()
self.iren.AddObserver('LeftButtonPressEvent', self.pushLeftButtonPressEventOnVTK, 1.0)
# Add Event for get Position
def pushLeftButtonPressEventOnVTK(self, obj, ev):
clickPos = self.iren.GetEventPosition()
#print(clickPos)
picker = vtk.vtkPropPicker()
picker.Pick(clickPos[0], clickPos[1], 0, self.ren)
print(picker.GetPickPosition())
def setMainCamera(self, R = np.eye(3), t = np.zeros((3,1)), fov = 80):
camera = vtk.vtkCamera()
camera.SetPosition(t[0,0],t[1,0],t[2,0])
#camera.SetFocalPoint(0,1,0)
focalpoint = np.array([[0],[0],[1]])
focalpoint = np.dot(R,focalpoint) + t
camera.SetFocalPoint(focalpoint[0],focalpoint[1],focalpoint[2])
ref = np.array([[0],[-1],[0]])
cam_up = np.dot(R, ref)
#camera.SetPosition(0,1,0)
#camera.SetViewUp(0,1,0)
camera.SetViewUp(cam_up[0],cam_up[1],cam_up[2])
camera.SetViewAngle(fov)
self.ren.SetActiveCamera(camera)
def setMainCameraToSeeTarget(self, t = np.zeros((3,1)), target = np.zeros((3,1)), fov = 80):
camera = vtk.vtkCamera()
camera.SetPosition(t[0,0],t[1,0],t[2,0])
#print("Position :", t)
#camera.SetFocalPoint(0,1,0)
#focalpoint = np.array([[0],[0],[1]])
#focalpoint = np.dot(R,focalpoint) + t
target_focalpoint = (target - t).ravel()
#print(target_focalpoint)
target_focalpoint = target_focalpoint / np.linalg.norm(target_focalpoint)
#print("focalpoint", target)
camera.SetFocalPoint(target[0],target[1],target[2])
ref = np.array([[0],[-1],[0]]).ravel()
#print(focalpoint, ref)
ref_right = np.cross(target_focalpoint, ref)
ref_right = ref_right / np.linalg.norm(ref_right)
#print(ref_right, focalpoint)
cam_up = np.cross(ref_right, target_focalpoint)
cam_up = cam_up / np.linalg.norm(cam_up)
print("Up",cam_up)
#cam_up = np.dot(R, ref)
#camera.SetPosition(0,1,0)
#camera.SetViewUp(0,1,0)
camera.SetViewUp(cam_up[0],cam_up[1],cam_up[2])
camera.SetViewAngle(fov)
self.ren.SetActiveCamera(camera)
def getActorList(self):
return self.actor_list.keys()
def removeActorByName(self, name):
#print(self.actor_list)
if name in self.actor_list.keys():
actor = self.actor_list.pop(name)
self.ren.RemoveActor(actor)
#print("remove! ", name)
def addText(self, name, text, pos_x, pos_y):
self.removeActorByName(name)
textActor = vtk.vtkTextActor()
textActor.SetInput( text )
textActor.SetPosition( pos_x, pos_y )
textActor.GetTextProperty().SetFontSize ( 50 )
textActor.GetTextProperty().SetColor ( 1.0, 1.0, 1.0 )
self.ren.AddActor2D(textActor)
self.actor_list[name] = textActor
def addPlane(self, name, point1, point2, point3, color=np.array([255.0,255.0,255.0]), opacity=1.0):
self.removeActorByName(name)
# Create a plane
planeSource = vtk.vtkPlaneSource()
# planeSource.SetOrigin(center_point[0], center_point[1], center_point[2])
# #planeSource.SetNormal(normal_vector[0], normal_vector[1], normal_vector[2])
# #print(dir(planeSource))
# planeSource.SetPoint1(top_left_point[0], top_left_point[1], top_left_point[2])
# planeSource.SetPoint2(bot_right_point[0], bot_right_point[1], bot_right_point[2])
# planeSource.SetXResolution(10)
# planeSource.SetYResolution(340)
planeSource.SetOrigin(point1[0], point1[1], point1[2])
planeSource.SetPoint1(point2[0], point2[1], point2[2])
planeSource.SetPoint2(point3[0], point3[1], point3[2])
planeSource.SetXResolution(10)
planeSource.SetYResolution(340)
planeSource.Update()
plane = planeSource.GetOutput()
# Create a mapper and actor
polygonMapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
polygonMapper.SetInputConnection(polygon.GetProducerPort())
else:
polygonMapper.SetInputData(plane)
polygonMapper.Update()
polygonActor = vtk.vtkActor()
polygonActor.SetMapper(polygonMapper)
polygonActor.GetProperty().SetColor([color[0],color[1],color[2]])
polygonActor.GetProperty().SetOpacity(opacity)
#actor.GetProperty().SetColor(colors->GetColor3d("Cyan").GetData());
self.ren.AddActor(polygonActor)
self.actor_list[name] = polygonActor
def addPlanWithTexture(self, name, point1, point2, point3, path, opacity=1.0):
self.removeActorByName(name)
#png_file = vtk.vtkPNGReader()
#print(png_file.CanReadFile(path))
# Read the image which will be the texture
#vtkSmartPointer<vtkJPEGReader> jPEGReader = vtkSmartPointer<vtkJPEGReader>::New();
#jPEGReader->SetFileName ( inputFilename.c_str() );
img = vtk.vtkJPEGReader()
img.SetFileName(path)
#print(img.CanReadFile(path))
#print(path)
# Create a plane
#vtkSmartPointer<vtkPlaneSource> plane = vtkSmartPointer<vtkPlaneSource>::New();
#plane->SetCenter(0.0, 0.0, 0.0);
#plane->SetNormal(0.0, 0.0, 1.0);
plane = vtk.vtkPlaneSource()
# planeSource.SetOrigin(center_point[0], center_point[1], center_point[2])
# #planeSource.SetNormal(normal_vector[0], normal_vector[1], normal_vector[2])
# #print(dir(planeSource))
# planeSource.SetPoint1(top_left_point[0], top_left_point[1], top_left_point[2])
# planeSource.SetPoint2(bot_right_point[0], bot_right_point[1], bot_right_point[2])
# planeSource.SetXResolution(10)
# planeSource.SetYResolution(340)
#plane.SetCenter(0.0,0.0,0.0)
#plane.SetNormal(0.0,0.0,1.0)
plane.SetOrigin(point1[0], point1[1], point1[2])
plane.SetPoint1(point2[0], point2[1], point2[2])
plane.SetPoint2(point3[0], point3[1], point3[2])
plane.SetXResolution(1920)
plane.SetYResolution(1080)
# Apply the texture
#vtkSmartPointer<vtkTexture> texture = vtkSmartPointer<vtkTexture>::New();
#texture->SetInputConnection(jPEGReader->GetOutputPort());
texture = vtk.vtkTexture()
texture.SetInputConnection(img.GetOutputPort())
#vtkSmartPointer<vtkTextureMapToPlane> texturePlane = vtkSmartPointer<vtkTextureMapToPlane>::New();
#texturePlane->SetInputConnection(plane->GetOutputPort());
texturePlane = vtk.vtkTextureMapToPlane()
texturePlane.SetInputConnection(plane.GetOutputPort())
#planeSource.Update()
#plane = planeSource.GetOutput()
#vtkSmartPointer<vtkPolyDataMapper> planeMapper = vtkSmartPointer<vtkPolyDataMapper>::New();
#planeMapper->SetInputConnection(texturePlane->GetOutputPort());
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(texturePlane.GetOutputPort())
#vtkSmartPointer<vtkActor> texturedPlane = vtkSmartPointer<vtkActor>::New();
#texturedPlane->SetMapper(planeMapper);
#texturedPlane->SetTexture(texture);
texturedPlane = vtk.vtkActor()
texturedPlane.SetMapper(planeMapper)
texturedPlane.SetTexture(texture)
# Create a mapper and actor
#polygonMapper = vtk.vtkPolyDataMapper()
#if vtk.VTK_MAJOR_VERSION <= 5:
# polygonMapper.SetInputConnection(texturePlane.GetProducePort())
#else:
# polygonMapper.SetInputData(texturePlane.GetOutput())
# polygonMapper.Update()
#polygonActor = vtk.vtkActor()
#polygonActor.SetMapper(polygonMapper)
#polygonActor.SetTexture(texture)
#polygonActor.GetProperty().SetColor([color[0],color[1],color[2]])
#polygonActor.GetProperty().SetOpacity(opacity)
#actor.GetProperty().SetColor(colors->GetColor3d("Cyan").GetData());
self.ren.AddActor(texturedPlane)
self.actor_list[name] = texturedPlane
def addLines(self, name, points, idx_list = None, line_width = 1, color=np.array([255.0,255.0,255.0])): # points => numpy vector [3, 0~n]
self.removeActorByName(name)
vtkpoints = vtk.vtkPoints()
vtklines = vtk.vtkCellArray()
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
points_size = points.shape[0]
vtkpoints.SetNumberOfPoints(points_size)
for idx, point in enumerate(points):
vtkpoints.SetPoint(idx, point[0], point[1], point[2])
colors.InsertNextTuple(color)
colors.SetName(name+"_colors")
if idx_list is None:
vtklines.InsertNextCell(points_size)
for idx in range(points_size):
vtklines.InsertCellPoint(idx)
else:
vtklines.InsertNextCell(len(idx_list))
for idx in idx_list:
vtklines.InsertCellPoint(idx)
polygon = vtk.vtkPolyData()
polygon.SetPoints(vtkpoints)
polygon.SetLines(vtklines)
polygon.GetCellData().SetScalars(colors)
polygonMapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
polygonMapper.SetInputConnection(polygon.GetProducerPort())
else:
polygonMapper.SetInputData(polygon)
polygonMapper.Update()
polygonActor = vtk.vtkActor()
polygonActor.SetMapper(polygonMapper)
polygonActor.GetProperty().SetLineWidth(line_width)
self.ren.AddActor(polygonActor)
self.actor_list[name] = polygonActor
def addCamera(self, name, R = np.eye(3), t = np.zeros((3,1)), cs = 0.1, line_width = 2, color=np.array([255,255,255])):
self.removeActorByName(name)
camera_points = np.zeros((12,3))
camera_points[0,:] = np.array([-cs/2, -cs/2, cs])
camera_points[1] = np.array([ cs/2, -cs/2, cs])
camera_points[2] = np.array([-cs/2, cs/2, cs])
camera_points[3] = np.array([ cs/2, cs/2, cs])
camera_points[4] = np.array([-cs/4, -cs/4, cs/2])
camera_points[5] = np.array([ cs/4, -cs/4, cs/2])
camera_points[6] = np.array([-cs/4, cs/4, cs/2])
camera_points[7] = np.array([ cs/4, cs/4, cs/2])
camera_points[8] = np.array([-cs/4, -cs/4, 0])
camera_points[9] = np.array([ cs/4, -cs/4, 0])
camera_points[10] = np.array([-cs/4, cs/4, 0])
camera_points[11] = np.array([ cs/4, cs/4, 0])
camera_points = np.transpose(camera_points)
camera_points = np.dot(R, camera_points) + np.matlib.repmat(t, 1, camera_points.shape[1])
camera_points = np.transpose(camera_points)
points = vtk.vtkPoints()
points.SetNumberOfPoints(12)
colors = vtk.vtkUnsignedCharArray()
points.SetNumberOfPoints(12)
colors.SetNumberOfComponents(3)
for idx, point in enumerate(camera_points):
points.SetPoint(idx, point[0], point[1], point[2])
colors.InsertNextTuple(color)
colors.SetName(name+"_colors")
lines = vtk.vtkCellArray()
lines.InsertNextCell(24)
lines.InsertCellPoint(0)
lines.InsertCellPoint(1)
lines.InsertCellPoint(3)
lines.InsertCellPoint(2)
lines.InsertCellPoint(0)
lines.InsertCellPoint(4)
lines.InsertCellPoint(5)
lines.InsertCellPoint(7)
lines.InsertCellPoint(6)
lines.InsertCellPoint(4)
lines.InsertCellPoint(8)
lines.InsertCellPoint(9)
lines.InsertCellPoint(11)
lines.InsertCellPoint(10)
lines.InsertCellPoint(8)
lines.InsertCellPoint(9)
lines.InsertCellPoint(5)
lines.InsertCellPoint(1)
lines.InsertCellPoint(3)
lines.InsertCellPoint(7)
lines.InsertCellPoint(11)
lines.InsertCellPoint(10)
lines.InsertCellPoint(6)
lines.InsertCellPoint(2)
polygon = vtk.vtkPolyData()
polygon.SetPoints(points)
polygon.SetLines(lines)
polygon.GetCellData().SetScalars(colors)
polygonMapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
polygonMapper.SetInputConnection(polygon.GetProducerPort())
else:
polygonMapper.SetInputData(polygon)
polygonMapper.Update()
polygonActor = vtk.vtkActor()
polygonActor.SetMapper(polygonMapper)
polygonActor.GetProperty().SetPointSize(0.1)
polygonActor.GetProperty().SetLineWidth(line_width)
self.ren.AddActor(polygonActor)
self.actor_list[name] = polygonActor
def drawPoints(self, name, point_list, input_color=np.array([[255,0,0]]), point_size = 2):
self.removeActorByName(name)
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
#colors.SetName("Colors")
#colors.SetNumberOfComponents(3)
if input_color.shape[0] == 1:
color_list = np.ones(point_list.shape) * input_color[0]
else:
color_list = input_color
for point, color in zip(point_list, color_list):
id = points.InsertNextPoint(point.tolist())
vertices.InsertNextCell(1)
vertices.InsertCellPoint(id)
colors.InsertNextTuple(color)
point = vtk.vtkPolyData()
# Set the points and vertices we created as the geometry and topology of the polydata
point.SetPoints(points)
point.SetVerts(vertices)
point.GetPointData().SetScalars(colors)
polygonMapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
polygonMapper.SetInputConnection(ps.GetProducerPort())
else:
polygonMapper.SetInputData(point)
polygonMapper.Update()
polygonActor = vtk.vtkActor()
polygonActor.SetMapper(polygonMapper)
polygonActor.GetProperty().SetPointSize(point_size)
self.ren.AddActor(polygonActor)
self.actor_list[name] = polygonActor
def render(self):
self.iren.Render()
if self.qtwidget_mode == False:
self.iren.Start()
if __name__ == "__main__":
window_width = 1.18
window_height = 0.75
window_points = [[-window_width/2, -window_height*math.cos((5.0/180.0) * math.pi), -window_height*math.sin((5.0/180.0) * math.pi)],
[ window_width/2, -window_height*math.cos((5.0/180.0) * math.pi), -window_height*math.sin((5.0/180.0) * math.pi)],
[-window_width/2, 0, 0],
[ window_width/2, 0, 0]]
index = np.array([0,1,3,2,0])
ren = vtkRenderer()
ren.addLines(np.transpose(window_points), index)
ren.showImage()
|
from distutils.core import setup
from setuptools import find_packages
import os
# User-friendly description from README.md
current_directory = os.path.dirname(os.path.abspath(__file__))
package_name = os.path.basename(current_directory)
try:
with open(os.path.join(current_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
except Exception:
long_description = ''
setup(
# Name of the package
name=package_name,
# Packages to include into the distribution
packages=find_packages(','),
# Start with a small number and increase it with
# every change you make https://semver.org
version='0.0.1',
# Chose a license from here: https: //
# help.github.com / articles / licensing - a -
# repository. For example: MIT
license='MIT',
# Short description of your library
description='A package to scrape financial data using tickers from the QuickFS website',
# Long description of your library
long_description=long_description,
long_description_content_type='text/markdown',
# Your name
author='Diego Heer',
# Your email
author_email='diegojonathanheer@gmail.com',
# Either the link to your github or to your website
url=r'www.github.com/DiegoHeer',
# List of keywords
keywords=['Stocks', 'Financial Analysis', 'Rule #1'],
# List of packages to install with this one
install_requires=[],
# https://pypi.org/classifiers/
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
zip_safe=False
)
|
from datetime import datetime, timedelta
from bisect import bisect_left
import numpy.ma as ma
from cdippy.cdippy import CDIPnc, Archive, Realtime, RealtimeXY, Historic
import cdippy.timestamp_utils as tsu
import cdippy.utils as cu
class StnData(CDIPnc):
"""
Returns data and metadata for the specified station.
This class handles the problem that neither the Realtime
nor the Historic .nc file may exist for either data or metadata,
and the number of deployment files is unknown apriori.
It tries to seam the multiple station files together.
"""
max_deployments = 99 # Checks at most this number of deployment nc files
# Commonly requested sets of variables
parameter_vars = ['waveHs', 'waveTp', 'waveDp', 'waveTa']
xyz_vars = ['xyzXDisplacement', 'xyzYDisplacement', 'xyzZDisplacement']
spectrum_vars = [
'waveEnergyDensity', 'waveMeanDirection',
'waveA1Value', 'waveB1Value', 'waveA2Value', 'waveB2Value',
'waveCheckFactor',]
meta_vars = [
'metaStationName',
'metaDeployLatitude', 'metaDeployLongitude', 'metaWaterDepth',
'metaDeclilnation']
meta_attributes = [
'wmo_id',
'geospatial_lat_min', 'geospatial_lat_max', 'geospatial_lat_units', 'geospatial_lat_resolution',
'geospatial_lon_min', 'geospatial_lon_max', 'geospatial_lon_units', 'geospatial_lon_resolution',
'geospatial_vertical_min', 'geospatial_vertical_max', 'geospatial_vertical_units', 'geospatial_vertical_resolution',
'time_coverage_start', 'time_coverage_end',
'date_created', 'date_modified' ]
def __init__(cls, stn, data_dir=None, org=None):
cls.nc = None
cls.stn = stn
cls.data_dir = data_dir
cls.org = org
cls.historic = Historic(cls.stn, cls.data_dir, cls.org)
cls.realtime = Realtime(cls.stn, cls.data_dir, cls.org)
if cls.historic and cls.historic.nc :
cls.meta = cls.historic
else:
if cls.realtime and cls.realtime.nc :
cls.meta = cls.realtime
else:
return None
def get_parameters(cls, start=None, end=None, pub_set='public', apply_mask=True, target_records=0):
return cls.get_series(start, end, cls.parameter_vars, pub_set, apply_mask, target_records)
def get_stn_meta(cls):
""" Returns a dict of station meta data using historic or realtime file. """
result = {}
if cls.meta is None:
return result
cls.meta.set_request_info(vrs=cls.meta_vars)
result = cls.meta.get_request()
for attr_name in cls.meta_attributes:
if hasattr(cls.meta.nc, attr_name):
result[attr_name] = getattr(cls.meta.nc, attr_name)
return result
def get_xyz(cls, start=None, end=None, pub_set='public'):
return cls.get_series(start, end, cls.xyz_vars, pub_set)
def get_spectra(cls, start=None, end=None, pub_set='public', apply_mask=True, target_records=0):
return cls.get_series(start, end, cls.spectrum_vars, pub_set, apply_mask, target_records)
def get_series(cls, start=None, end=None, vrs=None, pub_set='public', apply_mask=True, target_records=0):
"""
Returns a dict of data between start and end dates with specified quality.
Use this to get series that may span realtime and historic files.
If end is None, then start is considered a target date.
"""
if vrs is None:
vrs = cls.parameter_vars
prefix = cls.get_var_prefix(vrs[0])
if start is not None and end is None: # Target time
ts_I = cls.get_target_timespan(cu.datetime_to_timestamp(start), target_records, prefix+'Time')
if ts_I[0] is not None:
start = cu.timestamp_to_datetime(ts_I[0])
end = cu.timestamp_to_datetime(ts_I[1])
else:
return None
elif start is None: # Use default 3 days back
start = datetime.utcnow()-timedelta(days=3)
end = datetime.utcnow()
cls.set_request_info(start, end, vrs, pub_set, apply_mask)
if vrs is not None and prefix == 'xyz':
return cls.merge_xyz_request()
else:
return cls.merge_request()
def aggregate_dicts(cls, dict1, dict2):
""" Aggregate the data in two dictionaries. Dict1 has oldest data. """
#- Union the keys to make sure we check each one
ukeys = set(dict1.keys()) | set(dict2.keys())
result = { }
#- Combine the variables
for key in ukeys :
if key in dict2 and key in dict1:
result[key] = ma.concatenate([dict1[key], dict2[key]])
elif key in dict2:
result[key] = dict2[key]
else:
result[key] = dict1[key]
return result
def merge_xyz_request(cls):
""" Merge xyz data from realtime and archive nc files. """
if cls.vrs and cls.vrs[0] == 'xyzData':
cls.vrs = ['xyzXDisplacement','xyzYDisplacement','xyzZDisplacement']
request_timespan = cu.Timespan(cls.start_stamp, cls.end_stamp)
result = {}
def helper(cdip_nc, request_timespan, result):
# Try the next file if it is without xyz data
z = cdip_nc.get_var('xyzZDisplacement')
if z is None:
return result, cls.start_stamp
# Try the next file if start_stamp cannot be calculated
start_stamp = cdip_nc.get_xyz_timestamp(0)
end_stamp = cdip_nc.get_xyz_timestamp(len(z)-1)
if start_stamp is None:
return result, cls.start_stamp
file_timespan = cu.Timespan(start_stamp, end_stamp)
# Add data if request timespan overlaps data timespan
if request_timespan.overlap(file_timespan):
cdip_nc.start_stamp = cls.start_stamp
cdip_nc.end_stamp = cls.end_stamp
cdip_nc.pub_set = cls.pub_set
cdip_nc.apply_mask = cls.apply_mask
cdip_nc.vrs = cls.vrs
tmp_result = cdip_nc.get_request()
result = cls.aggregate_dicts(result, tmp_result)
return result, start_stamp
# First get realtime data if it exists
rt = RealtimeXY(cls.stn)
if rt.nc is not None:
result, start_stamp = helper(rt, request_timespan, result)
# If the request start time is more recent than the realtime
# start time, no need to look in the archives
if cls.start_stamp > start_stamp:
return result
# Second, look in archive files for data
for dep in range(1, cls.max_deployments):
deployment = 'd'+'{:02d}'.format(dep)
ar = Archive(cls.stn, deployment, cls.data_dir, cls.org)
if ar.nc is None:
break
result, start_stamp = helper(ar, request_timespan, result)
# Break if file start stamp is greater than request end stamp
if start_stamp > cls.end_stamp :
break
return result
def merge_request(cls):
""" Returns data for given request across realtime and historic files """
rt = {};
r = cls.realtime
# Note that we are assuming that waveTime will work for every time dim.
if r.nc is not None and r.get_var('waveTime')[0] <= cls.end_stamp:
r.vrs = cls.vrs
r.start_stamp = cls.start_stamp
r.end_stamp = cls.end_stamp
r.pub_set = cls.pub_set
r.apply_mask = cls.apply_mask
rt = r.get_request()
ht = {};
h = cls.historic
if h.nc is not None and h.get_var('waveTime')[-1] >= cls.start_stamp:
h.vrs = cls.vrs
h.start_stamp = cls.start_stamp
h.end_stamp = cls.end_stamp
h.pub_set = cls.pub_set
h.apply_mask = cls.apply_mask
ht = h.get_request()
return cls.aggregate_dicts(ht, rt)
def get_nc_files(cls, types=['realtime','historic','archive']):
""" Returns dict of netcdf4 objects of a station's netcdf files """
result = {}
for type in types:
if type == 'realtime':
rt = Realtime(cls.stn, cls.data_dir, cls.org)
if rt.nc:
result[rt.filename] = rt.nc
if type == 'historic':
ht = Historic(cls.stn, cls.data_dir, cls.org)
if ht.nc:
result[ht.filename] = ht.nc
if type == 'archive':
for dep in range(1,cls.max_deployments):
deployment = 'd'+'{:02d}'.format(dep)
ar = Archive(cls.stn, deployment, cls.data_dir, cls.org)
if ar.nc is None:
break
result[ar.filename] = ar
return result
def get_target_timespan(cls, target_timestamp, n, time_var):
"""
Returns a 2-tuple of timestamps, an interval corresponding to n records to
the right or left of target_timestamp.
Given a time_var (e.g. 'waveTime') and target timestamp, returns a 2-tuple
of timestamps corresponding to i and i+n (n<0 or n>=0) taken from
the realtime and historic nc files. Those timestamps can then be used in
set_request_info().
"""
r_ok = False
if cls.realtime.nc is not None:
r_ok = True
h_ok = False
if cls.historic.nc is not None:
h_ok = True
# Check realtime to find closest index
r_closest_idx = None
if r_ok:
r_stamps = cls.realtime.get_var(time_var)[:]
r_last_idx = len(r_stamps) - 1
i_b = bisect_left(r_stamps, target_timestamp)
# i_b will be possibly one more than the last index
i_b = min(i_b, r_last_idx)
# Target timestamp is exactly equal to a data time
if i_b == r_last_idx or r_stamps[i_b] == target_timestamp:
r_closest_idx = i_b
elif i_b > 0:
r_closest_idx = tsu.get_closest_index(i_b-1, i_b, r_stamps, target_timestamp)
# If closest index not found, check historic
h_closest_idx = None
h_last_idx = None # Let's us know if h_stamps has been loaded
if h_ok and not r_closest_idx:
h_stamps = cls.historic.get_var(time_var)[:]
h_last_idx = len(h_stamps) - 1
i_b = bisect_left(h_stamps, target_timestamp)
i_b = min(i_b, h_last_idx)
# Target timestamp is exactly equal to a data time
if (i_b <= h_last_idx and h_stamps[i_b] == target_timestamp) or i_b == 0:
h_closest_idx = i_b
elif i_b >= h_last_idx: # Target is between the two files
if r_ok:
if abs(h_stamps[h_last_idx]-target_timestamp) < abs(r_stamps[0]-target_timestamp):
h_closest_idx = i_b
else:
r_closest_idx = 0
else: # No realtime file
h_closest_idx = i_b
else: # Within middle of historic stamps
h_closest_idx = tsu.get_closest_index(i_b-1, i_b, h_stamps, target_timestamp)
# Now we have the closest index, find the intervals
if r_closest_idx is not None:
r_interval = tsu.get_interval(r_stamps, r_closest_idx, n)
# If bound exceeded toward H and H exists, cacluate h_interval
if r_interval[2] < 0 and h_ok:
if not h_last_idx:
h_stamps = cls.historic.get_var(time_var)[:]
h_last_idx = len(h_stamps) - 1
h_interval = tsu.get_interval(h_stamps, h_last_idx, n+r_closest_idx+1)
#print("Rx H interval: ", h_interval)
#print("Rx R interval: ", r_interval)
return tsu.combine_intervals(h_interval, r_interval)
else:
return r_interval
elif h_closest_idx is not None:
h_interval = tsu.get_interval(h_stamps, h_closest_idx, n)
# If bound exceeded toward R and R exists, cacluate r_interval
if h_interval[2] > 0 and r_ok:
r_interval = tsu.get_interval(r_stamps, 0, n+h_closest_idx-h_last_idx-1)
#print("Hx H interval: ", h_interval)
#print("Hx R interval: ", r_interval)
return tsu.combine_intervals(h_interval, r_interval)
else:
return h_interval
# If we get to here there's a problem
return (None, None, None)
if __name__ == "__main__":
#- Tests
def t0():
s = StnData('100p1')
d = s.get_stn_meta()
print(d)
def t1():
s = StnData('100p1')
d = s.get_spectra(datetime(2016,8,1), target_records=3)
print(d.keys())
print(d['waveEnergyDensity'].shape)
def t2():
s = StnData('100p1',org='ww3')
d = s.get_series('2016-08-01 00:00:00','2016-08-02 23:59:59',['waveHs'],'public')
print(d)
def t3():
s = StnData('100p1',data_dir='./gdata')
d = s.get_nc_files(['historic','archive','realtime'])
print(d.keys())
def t4():
s = StnData('100p1')
# Across deployments 5 and 6
d = s.get_series('2007-05-30 00:00:00','2007-06-01 23:59:59',['xyzData'],'public')
print(len(d['xyzXDisplacement']))
print(len(d['xyzTime']))
print(d['xyzTime'][0],d['xyzTime'][-1])
def t5():
s = StnData('100p1')
dt = datetime(2010,4,1,0,0)
d = s.get_series(dt, target_records=-4)
print(d)
def t6():
# Mark 1 filter delay set to -999.9
s = StnData('071p1')
end = datetime.utcnow()
end = datetime(1996,1,22,15,57,00)
start = end - timedelta(hours=2)
d = s.get_xyz(start, end)
print("D: "+repr(d))
print("Len: "+repr(len(d['xyzTime'])))
t6()
|
""" Local routines
Written by S.Haesaert
CONTENT
helpfull functions for JPL project
Bridging Tulip with the Statechart autocoder
DATE 2 June
"""
# TODO : Check whether output set of reduced mealy machines (i,e.,ctrl.outputs) is too big?
from __future__ import absolute_import
from __future__ import print_function
import logging
from itertools import product as it_product
from networkx.algorithms.minors import equivalence_classes
from tulip import transys
from Interface import synth2 as synth
logger = logging.getLogger(__name__)
def remove_aux_inputs(ctrl, inputs):
#1. check whether you are allowed to remove the aux inputs. <= not done
#2. remove aux. inputs.
ctrl_new = transys.MealyMachine()
ctrl_new.add_outputs(ctrl.outputs)
# this needs to be changed to be a limited set
inputs_dict = dict()
for i in inputs:
inputs_dict[i] = ctrl.inputs[i]
ctrl_new.add_inputs(inputs_dict)
# add nodes from original mealy
ctrl_new.add_nodes_from(ctrl.nodes())
block_pairs = it_product(ctrl, ctrl)
for (b, c) in block_pairs:
labels = {frozenset([(key, label[key]) for key in ctrl_new.inputs.keys()]
+ [(output, label[output]) for output in ctrl_new.outputs.keys()])
for (x, y, label) in ctrl.transitions.find(b, c)}
for q in labels:
ctrl_new.transitions.add(b, c, **dict(q))
ctrl_new.states.initial.add_from(ctrl.states.initial)
return ctrl_new
def reduce_mealy(ctrl, outputs={'ctrl'}, relabel=False, prune_set=None,
full=True, combine_trans=False, verbose=True):
""" reduce mealy machines by computing the quotient system of the maximal equivalence class
Parameters
----------
ctrl: mealy machine
outputs : Tells which outputs are critical and should be kept. Given as a set of strings.
relabel : True/False = Relabels nodes (especially needed when ctrl comes with hash like names)
prune_init : if set => try 'prune' => remove all transitions that do not belong to the set of allowed initialisations
Else determinize
"""
assert isinstance(prune_set, set) or prune_set is None, 'prune_set is not a set'
ctrl_s = prune_init(ctrl, init_event=prune_set)
if verbose: print('Original number of states = ' + str(len(ctrl)) + '\n'
+ ' number of transitions = ' + str(len(ctrl.transitions.find())))
it_beh = True
len_last = len(ctrl_s)
while it_beh:
equiv_classes = equiv_alpha(ctrl_s, outputs)
if verbose: print('Start iterating for maximally coarse bisimulation')
it = True
# now you should iterate for maximally coarse
while it:
if verbose: print('Number of states = ' + str(len(equiv_classes)))
equiv_classes_new = iterate_equiv(equiv_classes, ctrl_s, outputs=outputs)
it = (len(equiv_classes_new) != len(equiv_classes))
equiv_classes = equiv_classes_new
if verbose: print('Found equivalence classes')
# now compute quotient system
equiv_dict = dict(sum([list(it_product(block, {i})) for (i, block) in enumerate(equiv_classes)], []))
node_rel = lambda u, v: equiv_dict[u] == equiv_dict[v] # the initial relation
ctrl_s = quotient_mealy(ctrl_s, node_relation=node_rel, relabel=relabel, outputs=outputs)
if full:
equiv_classes = reduce_guar_beh(ctrl_s, outputs=outputs)
equiv_dict = dict(sum([list(it_product(block, {i})) for (i, block) in enumerate(equiv_classes)], []))
node_rel = lambda u, v: equiv_dict[u] == equiv_dict[v] # the initial relation => groups of nodes that can
# have equal next nodes
ctrl_s = quotient_mealy(ctrl_s, node_relation=node_rel, relabel=relabel, outputs=outputs)
if verbose: print('Behavioural equivalence reductions \n' +
'- number of states = ' + str(len(ctrl_s)) + '\n'
+ '- number of transitions = ' + str(len(ctrl_s.transitions.find())))
it_beh = ((len(ctrl_s) != len_last) and full)
len_last = len(ctrl_s)
if combine_trans:
ctrl_s = combine_transitions(ctrl_s)
if verbose: print('Combine transitions \n' +
'- number of states = ' + str(len(ctrl_s)) + '\n'
+ '- number of transitions = ' + str(len(ctrl_s.transitions.find())))
return ctrl_s
def reduce_guar_beh(ctrl,outputs={'loc'}):
ctrl_n=ctrl.copy()
"""
compute equivalence classes.
Parameters
----------
ctrl : mealy machine
outputs : Tells which outputs are critical and should be kept. Given as a set of strings.
Code is adapted from networkx.algorithms.minors.equivalenceclasses by Jeffry Finkelstein.
"""
# 1. Find R_0 = equivalence class of elements with the same labels on their outgoing transitions.
blocks = []
# Determine the equivalence class for each element of the iterable.
# TODO Order first :
# => Dont go directly over ctrl.states(), first order them on the number of transitions they have.
stat_len = [(y, len(ctrl_n.transitions.find(y))) for y in ctrl_n.states()]
sorted_nodes = sorted(stat_len, key=lambda stat_len: -stat_len[1])
for (y,_t) in sorted_nodes:
# Each element y must be in *exactly one* equivalence class.
#
# Each block is guaranteed to be non-empty
if y == 'Sinit': # the initial state gets its own block
blocks.append([y])
continue
for block in blocks:
x = next(iter(block))
if len(ctrl[x]) < len(ctrl[y]):
#print('unequal number')
continue
if x == 'Sinit': # the initial state gets its own block
continue
# compute set of labels:
labels_x = {frozenset([(key, label[key]) for key in ctrl_n.inputs.keys()]
+ [(output, label[output]) for output in outputs]+[('node',_y)])
for (_x, _y, label) in ctrl_n.transitions.find({x})}
labels_y = {frozenset([(key, label[key]) for key in ctrl_n.inputs.keys()]
+ [(output, label[output]) for output in outputs]+[('node',_y)])
for (_x, _y, label) in ctrl_n.transitions.find({y})}
if labels_y <= labels_x:
block.append(y)
break
labelin_x = {frozenset([(key, label[key]) for key in ctrl_n.inputs.keys()])
for (_x, _y, label) in ctrl_n.transitions.find({x})}
labelin_y = {frozenset([(key, label[key]) for key in ctrl_n.inputs.keys()])
for (_x, _y, label) in ctrl_n.transitions.find({y})}
if len(labels_y | labels_x) == len(labelin_y | labelin_x):
block.append(y) #TODO (THIS is WRONG, the labels are now no longer correct!!!
# after adding a new state to a block, the first state of the block needs to get
# additional outgoing transitions)
# you need to also immediatly add the additional outgoing transition. Otherwise you are creating errors )
# find the missing input labels
for label in labels_y.difference(labels_x):
ldict=dict(label)
ctrl_n.transitions.add(x, ldict.pop('node'), **ldict)
ctrl_n.transitions.find(x, **ldict)
# labels = {frozenset([(key, label[key]) for key in mealy.inputs.keys()]
# + [(output, label[output]) for output in outputs])
# for (x, y, label) in mealy.transitions.find(b, c)}
# for q in labels:
# q_mealy.transitions.add(mapping[b], mapping[c], **dict(q))
break
else:
# If the element y is not part of any known equivalence class, it
# must be in its own, so we create a new singleton equivalence
# class for it.
blocks.append([y])
return {frozenset(block) for block in blocks}
def combine_transitions(ctrl):
""" Combine parallell transitions when they are independent of environment actions
Parameters
----------
ctrl: mealy machine
"""
ctrl_copy = ctrl.copy()
for c_state in ctrl_copy.nodes():
for post_s in ctrl_copy.states.post(c_state):
logger.info('(' + str(c_state) + ')' + '(' + str(post_s) + ')')
labels = [set(label.items()) for (x, y, label) in ctrl_copy.transitions.find({c_state}, {post_s})]
min_set = set.intersection(*labels)
labels_mins = [lab - min_set for lab in labels]
if set.union(*labels_mins) == set():
continue
list_in = [set(it_product({key}, values)) for (key, values) in ctrl_copy.inputs.items()
if (not values == {0, 1}) & (set(it_product({key}, values)) <= set.union(*labels_mins))] + [
set(it_product({key}, {True, False})) for (key, values) in ctrl_copy.inputs.items()
if ((values == {0, 1}) & (set(it_product({key}, values)) <= set.union(*labels_mins)))]
labels_updated = labels.copy()
for list_el in list_in:
for label in labels_updated:
label_gen = [(label - list_el) | {el_set} for el_set in list_el]
if all([any([label_gen_el == labels_el for labels_el in labels_updated]) for label_gen_el in
label_gen]):
labels_updated = set(frozenset(labels_el) for labels_el in labels_updated if
not any([label_gen_el == labels_el for label_gen_el in label_gen]))
labels_updated |= {frozenset((label - list_el))}
ctrl_copy.transitions.remove_from(ctrl_copy.transitions.find({c_state}, {post_s}))
for labels_updated_el in labels_updated:
ctrl_copy.transitions.add(c_state, post_s, dict(set(labels_updated_el)))
return ctrl_copy
def equiv_alpha(ctrl, outputs={'loc'}):
"""
compute equivalence classes.
Parameters
----------
ctrl : mealy machine
outputs : Tells which outputs are critical and should be kept. Given as a set of strings.
Code is adapted from networkx.algorithms.minors.equivalenceclasses by Jeffry Finkelstein.
"""
# 1. Find R_0 = equivalence class of elements with the same labels on their outgoing transitions.
blocks = []
# Determine the equivalence class for each element of the iterable.
for y in ctrl.states():
# Each element y must be in *exactly one* equivalence class.
#
# Each block is guaranteed to be non-empty
for block in blocks:
x = next(iter(block))
if len(ctrl[x]) != len(ctrl[y]):
# print('unequal number')
continue
# compute set of labels:
labels_x = {frozenset([(key, label[key]) for key in ctrl.inputs.keys()]
+ [(output, label[output]) for output in outputs])
for (_x, _y, label) in ctrl.transitions.find({x})}
labels_y = {frozenset([(key, label[key]) for key in ctrl.inputs.keys()]
+ [(output, label[output]) for output in outputs])
for (_x, _y, label) in ctrl.transitions.find({y})}
if labels_x == labels_y:
block.append(y)
break
else:
# If the element y is not part of any known equivalence class, it
# must be in its own, so we create a new singleton equivalence
# class for it.
blocks.append([y])
return {frozenset(block) for block in blocks}
def iterate_equiv(q_blocks, ctrl, outputs={'loc'}):
""" Iterate the equivalence classes
Parameters
----------
q_blocks : equivalence classes
ctrl : mealy machine
outputs : Tells which outputs are critical and should be kept. Given as a set of strings.
"""
dict__r = dict(sum([list(it_product(block, {i})) for (i, block) in enumerate(q_blocks)], []))
blocks = []
# Determine the equivalence class for each element of the iterable.
for y in ctrl.states():
# Each element y must be in *exactly one* equivalence class.
#
# Each block is guaranteed to be non-empty
if y in ctrl.states.initial:
blocks.append([y]) # We don't want to group in the initial state. Because that will give issues witht he autocoding.
else:
for block in blocks:
x = next(iter(block))
if len(ctrl[x]) != len(ctrl[y]):
# print('unequal number')
continue
# compute set of labels:
labels_x = {frozenset([(key, label[key]) for key in ctrl.inputs.keys()] +
[(output, label[output]) for output in outputs] +
[('Relx', dict__r[_x])]+[('Rely', dict__r[_y])])
for (_x, _y, label) in ctrl.transitions.find({x})}
labels_y = {frozenset([(key, label[key]) for key in ctrl.inputs.keys()] +
[(output, label[output]) for output in outputs] +
[('Relx', dict__r[_x])]+[('Rely', dict__r[_y])])
for (_x, _y, label) in ctrl.transitions.find({y})}
if labels_x == labels_y:
block.append(y)
break
else:
# If the element y is not part of any known equivalence class, it
# must be in its own, so we create a new singleton equivalence
# class for it.
blocks.append([y])
return {frozenset(block) for block in blocks}
def prune_init(ctrl,init_event=None):
ctrl_s = synth.determinize_machine_init(ctrl)
if init_event is not None:
try:
keys = list(set(key for (key,val) in list(init_event)))
inputsb = {env_var: ctrl.inputs[env_var] for env_var in keys}
# this allows you to give a subset of the inputs
set_in = set.union(*[set(it_product({key}, values)) for (key, values) in inputsb.items()
if not values == {0, 1}] + [
set(it_product({key}, {True, False})) for (key, values) in inputsb.items()
if values == {0, 1}])
if not init_event <= set_in:
raise ValueError('The set of initial environment values does not'
' belong to the set of inputs of the mealy machine')
for s, to, label in ctrl_s.transitions.find({'Sinit'}):
if not (set.intersection(set(label.items()), set_in)) <= init_event:
ctrl_s.transitions.remove(s, to, attr_dict=label)
if ctrl_s['Sinit'] is None:
raise ValueError('The set of initial environment values does not'
' belong to the set of inputs of the mealy machine.\n'
' All initial transitions were removed.')
except ValueError as inst:
print(inst.args)
print('Determinized Mealy machine,'
' initial transitions have not been pruned.(WARNING)')
return synth.determinize_machine_init(ctrl)
return ctrl_s
def quotient_mealy(mealy, node_relation=None, relabel=False, outputs={'loc'}):
"""Returns the quotient graph of ``G`` under the specified equivalence
relation on nodes.
Parameters
----------
mealy : NetworkX graph
The graph for which to return the quotient graph with the specified node
relation.
node_relation : Boolean function with two arguments
This function must represent an equivalence relation on the nodes of
``G``. It must take two arguments *u* and *v* and return ``True``
exactly when *u* and *v* are in the same equivalence class. The
equivalence classes form the nodes in the returned graph.
unlike the original networkx.quotient_graph selfloops are maintained
relabel : Boolean
if true relabel nodes in the graph
outputs : Tells which outputs are critical and should be kept. Given as a set of strings.
"""
if node_relation is None:
node_relation = lambda u, v: mealy.states.post(u) == mealy.states.post(v)
q_mealy = transys.MealyMachine()
q_mealy.add_inputs(mealy.inputs)
q_mealy.add_outputs(mealy.outputs)
# Compute the blocks of the partition on the nodes of G induced by the
# equivalence relation R.
if relabel:
mapping = dict((n, i) for (i, n) in enumerate(equivalence_classes(mealy, node_relation)))
for (n, i) in mapping.items():
if {'Sinit'} <= set(n):
mapping[n] = 'Sinit'
q_mealy.add_nodes_from({n for (i, n) in mapping.items()})
else:
q_mealy.add_nodes_from(equivalence_classes(mealy, node_relation))
if relabel:
block_pairs = it_product(mapping.keys(), mapping.keys())
for (b, c) in block_pairs:
labels = {frozenset([(key, label[key]) for key in mealy.inputs.keys()]
+ [(output, label[output]) for output in outputs])
for (x, y, label) in mealy.transitions.find(b, c)}
for q in labels:
q_mealy.transitions.add(mapping[b], mapping[c], **dict(q))
else:
block_pairs = it_product(q_mealy, q_mealy)
for (b, c) in block_pairs:
labels = {frozenset([(key, label[key]) for key in mealy.inputs.keys()]
+ [(output, label[output]) for output in outputs])
for (x, y, label) in mealy.transitions.find(b, c)}
for q in labels:
q_mealy.transitions.add(b, c, **dict(q))
if relabel:
for node_eq in mapping.keys():
if any(init in node_eq for init in mealy.states.initial):
q_mealy.states.initial.add(mapping[node_eq])
else: # only initializing after relabel
for node_eq in q_mealy.nodes():
if any(init in node_eq for init in mealy.states.initial):
q_mealy.states.initial.add(node_eq)
return q_mealy
def save_png(ctrl,name='untitled'):
from tulip.transys.export import graph2dot
pydot_ctrl = graph2dot._graph2pydot(ctrl)
pydot_ctrl.set_rankdir('TB')
# pydot_ctrl.set_splines('polyline')
pydot_ctrl.set_bgcolor('white')
pydot_ctrl.set_nodesep(.4)
pydot_ctrl.set_ranksep(.4)
pydot_ctrl.set_size('"40,30"')
pydot_ctrl.set_concentrate('False')
#png_str = pydot_ctrl.create_jpeg(prog='dot')
pydot_ctrl.write_png(name+'.png',prog='dot')
return
|
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="scatterpolar.marker.colorbar.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
|
import time
from twilio import jwt
class IpMessagingGrant(object):
""" Grant to access Twilio IP Messaging """
def __init__(self, service_sid=None, endpoint_id=None,
deployment_role_sid=None, push_credential_sid=None):
self.service_sid = service_sid
self.endpoint_id = endpoint_id
self.deployment_role_sid = deployment_role_sid
self.push_credential_sid = push_credential_sid
@property
def key(self):
return "ip_messaging"
def to_payload(self):
grant = {}
if self.service_sid:
grant['service_sid'] = self.service_sid
if self.endpoint_id:
grant['endpoint_id'] = self.endpoint_id
if self.deployment_role_sid:
grant['deployment_role_sid'] = self.deployment_role_sid
if self.push_credential_sid:
grant['push_credential_sid'] = self.push_credential_sid
return grant
class ConversationsGrant(object):
""" Grant to access Twilio Conversations """
def __init__(self, configuration_profile_sid=None):
self.configuration_profile_sid = configuration_profile_sid
@property
def key(self):
return "rtc"
def to_payload(self):
grant = {}
if self.configuration_profile_sid:
grant['configuration_profile_sid'] = self.configuration_profile_sid
return grant
class AccessToken(object):
""" Access Token used to access Twilio Resources """
def __init__(self, account_sid, signing_key_sid, secret,
identity=None, ttl=3600, nbf=None):
self.account_sid = account_sid
self.signing_key_sid = signing_key_sid
self.secret = secret
self.identity = identity
self.ttl = ttl
self.nbf = nbf
self.grants = []
def add_grant(self, grant):
self.grants.append(grant)
def to_jwt(self, algorithm='HS256'):
now = int(time.time())
headers = {
"typ": "JWT",
"cty": "twilio-fpa;v=1"
}
grants = {}
if self.identity:
grants["identity"] = self.identity
for grant in self.grants:
grants[grant.key] = grant.to_payload()
payload = {
"jti": '{0}-{1}'.format(self.signing_key_sid, now),
"iss": self.signing_key_sid,
"sub": self.account_sid,
"exp": now + self.ttl,
"grants": grants
}
if self.nbf is not None:
payload['nbf'] = self.nbf
return jwt.encode(payload, self.secret, headers=headers,
algorithm=algorithm)
def __str__(self):
return self.to_jwt()
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
import socket
import sys
from oslo_log import log as logging
from six.moves import http_cookies as Cookie
import six.moves.urllib.parse as urlparse
import websockify
import nova.conf
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova import exception
from nova.i18n import _
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class NovaProxyRequestHandlerBase(object):
def address_string(self):
# NOTE(rpodolyaka): override the superclass implementation here and
# explicitly disable the reverse DNS lookup, which might fail on some
# deployments due to DNS configuration and break VNC access completely
return str(self.client_address[0])
def verify_origin_proto(self, connection_info, origin_proto):
access_url = connection_info.get('access_url')
if not access_url:
detail = _("No access_url in connection_info. "
"Cannot validate protocol")
raise exception.ValidationError(detail=detail)
expected_protos = [urlparse.urlparse(access_url).scheme]
# NOTE: For serial consoles the expected protocol could be ws or
# wss which correspond to http and https respectively in terms of
# security.
if 'ws' in expected_protos:
expected_protos.append('http')
if 'wss' in expected_protos:
expected_protos.append('https')
return origin_proto in expected_protos
def new_websocket_client(self):
"""Called after a new WebSocket connection has been established."""
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
from eventlet import hubs
hubs.use_hub()
# The nova expected behavior is to have token
# passed to the method GET of the request
parse = urlparse.urlparse(self.path)
if parse.scheme not in ('http', 'https'):
# From a bug in urlparse in Python < 2.7.4 we cannot support
# special schemes (cf: http://bugs.python.org/issue9374)
if sys.version_info < (2, 7, 4):
raise exception.NovaException(
_("We do not support scheme '%s' under Python < 2.7.4, "
"please use http or https") % parse.scheme)
query = parse.query
token = urlparse.parse_qs(query).get("token", [""]).pop()
if not token:
# NoVNC uses it's own convention that forward token
# from the request to a cookie header, we should check
# also for this behavior
hcookie = self.headers.getheader('cookie')
if hcookie:
cookie = Cookie.SimpleCookie()
cookie.load(hcookie)
if 'token' in cookie:
token = cookie['token'].value
ctxt = context.get_admin_context()
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = rpcapi.check_token(ctxt, token=token)
if not connect_info:
raise exception.InvalidToken(token=token)
# Verify Origin
expected_origin_hostname = self.headers.getheader('Host')
if ':' in expected_origin_hostname:
e = expected_origin_hostname
if '[' in e and ']' in e:
expected_origin_hostname = e.split(']')[0][1:]
else:
expected_origin_hostname = e.split(':')[0]
expected_origin_hostnames = CONF.console_allowed_origins
expected_origin_hostnames.append(expected_origin_hostname)
origin_url = self.headers.getheader('Origin')
# missing origin header indicates non-browser client which is OK
if origin_url is not None:
origin = urlparse.urlparse(origin_url)
origin_hostname = origin.hostname
origin_scheme = origin.scheme
if origin_hostname == '' or origin_scheme == '':
detail = _("Origin header not valid.")
raise exception.ValidationError(detail=detail)
if origin_hostname not in expected_origin_hostnames:
detail = _("Origin header does not match this host.")
raise exception.ValidationError(detail=detail)
if not self.verify_origin_proto(connect_info, origin_scheme):
detail = _("Origin header protocol does not match this host.")
raise exception.ValidationError(detail=detail)
self.msg(_('connect info: %s'), str(connect_info))
host = connect_info['host']
port = int(connect_info['port'])
# Connect to the target
self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host,
'port': port})
tsock = self.socket(host, port, connect=True)
# Handshake as necessary
if connect_info.get('internal_access_path'):
tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
while True:
data = tsock.recv(4096, socket.MSG_PEEK)
if data.find("\r\n\r\n") != -1:
if data.split("\r\n")[0].find("200") == -1:
raise exception.InvalidConnectionInfo()
tsock.recv(len(data))
break
# Start proxying
try:
self.do_proxy(tsock)
except Exception:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
self.vmsg(_("%(host)s:%(port)s: Target closed") %
{'host': host, 'port': port})
raise
class NovaProxyRequestHandler(NovaProxyRequestHandlerBase,
websockify.ProxyRequestHandler):
def __init__(self, *args, **kwargs):
websockify.ProxyRequestHandler.__init__(self, *args, **kwargs)
def socket(self, *args, **kwargs):
return websockify.WebSocketServer.socket(*args, **kwargs)
class NovaWebSocketProxy(websockify.WebSocketProxy):
@staticmethod
def get_logger():
return LOG
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class GetPersonalPreferencesResponse(Model):
"""Represents the PersonalPreferences for the user.
:param id: Id to be used by the cache orchestrator
:type id: str
:param favorite_lab_resource_ids: Array of favorite lab resource ids
:type favorite_lab_resource_ids: list[str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'favorite_lab_resource_ids': {'key': 'favoriteLabResourceIds', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(GetPersonalPreferencesResponse, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.favorite_lab_resource_ids = kwargs.get('favorite_lab_resource_ids', None)
|
import torch
import torch.nn
from torch.nn.parallel import DistributedDataParallel as DDP
from collections import OrderedDict
import os.path as osp
import wandb
from utils.utils import DotDict
class Model:
def __init__(self, hp, net_arch, loss_f, rank=0, world_size=1):
self.hp = hp
self.device = self.hp.model.device
self.net = net_arch.to(self.device)
self.rank = rank
self.world_size = world_size
if self.device != "cpu" and self.world_size != 0:
self.net = DDP(self.net, device_ids=[self.rank])
self.input = None
self.GT = None
self.step = 0
self.epoch = -1
# init optimizer
optimizer_mode = self.hp.train.optimizer.mode
if optimizer_mode == "adam":
self.optimizer = torch.optim.Adam(
self.net.parameters(), **(self.hp.train.optimizer[optimizer_mode])
)
else:
raise Exception("%s optimizer not supported" % optimizer_mode)
# init loss
self.loss_f = loss_f
self.log = DotDict()
def feed_data(self, **data): # data's keys: input, GT
for k, v in data.items():
data[k] = v.to(self.device)
self.input = data.get("input")
self.GT = data.get("GT")
def optimize_parameters(self):
self.net.train()
self.optimizer.zero_grad()
output = self.run_network()
loss_v = self.loss_f(output, self.GT)
loss_v.backward()
self.optimizer.step()
# set log
self.log.loss_v = loss_v.item()
def inference(self):
self.net.eval()
output = self.run_network()
return output
def run_network(self):
output = self.net(self.input)
return output
def save_network(self, logger, save_file=True):
if self.rank == 0:
net = self.net.module if isinstance(self.net, DDP) else self.net
state_dict = net.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.to("cpu")
if save_file:
save_filename = "%s_%d.pt" % (self.hp.log.name, self.step)
save_path = osp.join(self.hp.log.chkpt_dir, save_filename)
torch.save(state_dict, save_path)
if self.hp.log.use_wandb:
wandb.save(save_path)
if logger is not None:
logger.info("Saved network checkpoint to: %s" % save_path)
return state_dict
def load_network(self, loaded_net=None, logger=None):
add_log = False
if loaded_net is None:
add_log = True
if self.hp.load.wandb_load_path is not None:
self.hp.load.network_chkpt_path = wandb.restore(
self.hp.load.network_chkpt_path,
run_path=self.hp.load.wandb_load_path,
).name
loaded_net = torch.load(
self.hp.load.network_chkpt_path, map_location=torch.device(self.device)
)
loaded_clean_net = OrderedDict() # remove unnecessary 'module.'
for k, v in loaded_net.items():
if k.startswith("module."):
loaded_clean_net[k[7:]] = v
else:
loaded_clean_net[k] = v
self.net.load_state_dict(loaded_clean_net, strict=self.hp.load.strict_load)
if logger is not None and add_log:
logger.info("Checkpoint %s is loaded" % self.hp.load.network_chkpt_path)
def save_training_state(self, logger):
if self.rank == 0:
save_filename = "%s_%d.state" % (self.hp.log.name, self.step)
save_path = osp.join(self.hp.log.chkpt_dir, save_filename)
net_state_dict = self.save_network(None, False)
state = {
"model": net_state_dict,
"optimizer": self.optimizer.state_dict(),
"step": self.step,
"epoch": self.epoch,
}
torch.save(state, save_path)
if self.hp.log.use_wandb:
wandb.save(save_path)
if logger is not None:
logger.info("Saved training state to: %s" % save_path)
def load_training_state(self, logger):
if self.hp.load.wandb_load_path is not None:
self.hp.load.resume_state_path = wandb.restore(
self.hp.load.resume_state_path, run_path=self.hp.load.wandb_load_path
).name
resume_state = torch.load(
self.hp.load.resume_state_path, map_location=torch.device(self.device)
)
self.load_network(loaded_net=resume_state["model"], logger=logger)
self.optimizer.load_state_dict(resume_state["optimizer"])
self.step = resume_state["step"]
self.epoch = resume_state["epoch"]
if logger is not None:
logger.info(
"Resuming from training state: %s" % self.hp.load.resume_state_path
)
|
# 2. Write a code which accepts a sequence of words as input
# and prints the words in a sequence after sorting them alphabetically.
print("Enter sequence of words")
print("For example -\nMy name is Sumit\n")
words = input(">>> ")
temp = words.split(" ")
temp.sort()
sorted_string = " ".join(temp)
print("string after sorting is - \n")
print(f"{sorted_string}")
|
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pytest
import unittest
import numpy as np
import tensorflow as tf
from tensorforce import util
from test.unittest_base import UnittestBase
class TestPrecision(UnittestBase, unittest.TestCase):
exclude_bounded_action = True # TODO: shouldn't be necessary!
require_observe = True
def test_precision(self):
self.start_tests()
try:
util.np_dtype_mapping = dict(
bool=np.bool_, int=np.int16, long=np.int32, float=np.float32 # TODO: float16
)
util.tf_dtype_mapping = dict(
bool=tf.bool, int=tf.int16, long=tf.int32, float=tf.float32 # TODO: float16
)
self.unittest(network=dict(type='auto', internal_rnn=False)) # TODO: shouldn't be necessary!
except Exception as exc:
raise exc
self.assertTrue(expr=False)
finally:
util.np_dtype_mapping = dict(
bool=np.bool_, int=np.int32, long=np.int64, float=np.float32
)
util.tf_dtype_mapping = dict(
bool=tf.bool, int=tf.int32, long=tf.int64, float=tf.float32
)
|
# -*- coding: utf-8 -*-
import numpy as np
import pymc3 as pm
from scipy.stats import kstest
from .base_test import _Base
from .physical import ImpactParameter, QuadLimbDark
class TestPhysical(_Base):
random_seed = 19860925
def test_quad_limb_dark(self):
with self._model():
dist = QuadLimbDark("u", shape=2)
# Test random sampling
samples = dist.random(size=100)
assert np.shape(samples) == (100, 2)
logp = QuadLimbDark.dist(shape=2).logp(samples).eval().flatten()
assert np.all(np.isfinite(logp))
assert np.allclose(logp[0], logp)
trace = self._sample()
u1 = trace["u"][:, 0]
u2 = trace["u"][:, 1]
# Make sure that the physical constraints are satisfied
assert np.all(u1 + u2 < 1)
assert np.all(u1 > 0)
assert np.all(u1 + 2 * u2 > 0)
# Make sure that the qs are uniform
q1 = (u1 + u2) ** 2
q2 = 0.5 * u1 / (u1 + u2)
cdf = lambda x: np.clip(x, 0, 1) # NOQA
for q in (q1, q2):
s, p = kstest(q, cdf)
assert s < 0.05
def test_impact(self):
lower = 0.1
upper = 1.0
with self._model():
ror = pm.Uniform("ror", lower=lower, upper=upper, shape=(5, 2))
dist = ImpactParameter("b", ror=ror)
# Test random sampling
samples = dist.random(size=100)
assert np.shape(samples) == (100, 5, 2)
assert np.all((0 <= samples) & (samples <= 1 + upper))
trace = self._sample()
u = trace["ror"]
u = np.reshape(u, (len(u), -1))
cdf = lambda x: np.clip((x - lower) / (upper - lower), 0, 1) # NOQA
for i in range(u.shape[1]):
s, p = kstest(u[:, i], cdf)
assert s < 0.05
assert np.all(trace["b"] <= 1 + trace["ror"])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011,2012,2013 American Registry for Internet Numbers
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
# IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Generated Sat Aug 31 15:00:08 2013 by generateDS.py version 2.10a.
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'GMT')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class net(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, registrationDate=None, ref=None, customerRef=None, endAddress=None, handle=None, name=None, nameservers=None, netBlocks=None, originASes=None, pocs=None, orgRef=None, comment=None, parentNetRef=None, startAddress=None, updateDate=None, version=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
if isinstance(registrationDate, basestring):
initvalue_ = datetime_.datetime.strptime(registrationDate, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = registrationDate
self.registrationDate = initvalue_
self.ref = ref
self.customerRef = customerRef
self.endAddress = endAddress
self.handle = handle
self.name = name
self.nameservers = nameservers
self.netBlocks = netBlocks
self.originASes = originASes
self.pocs = pocs
self.orgRef = orgRef
self.comment = comment
self.parentNetRef = parentNetRef
self.comment = comment
self.startAddress = startAddress
if isinstance(updateDate, basestring):
initvalue_ = datetime_.datetime.strptime(updateDate, '%Y-%m-%dT%H:%M:%S')
else:
initvalue_ = updateDate
self.updateDate = initvalue_
self.version = version
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if net.subclass:
return net.subclass(*args_, **kwargs_)
else:
return net(*args_, **kwargs_)
factory = staticmethod(factory)
def get_registrationDate(self): return self.registrationDate
def set_registrationDate(self, registrationDate): self.registrationDate = registrationDate
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def get_customerRef(self): return self.customerRef
def set_customerRef(self, customerRef): self.customerRef = customerRef
def get_endAddress(self): return self.endAddress
def set_endAddress(self, endAddress): self.endAddress = endAddress
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_nameservers(self): return self.nameservers
def set_nameservers(self, nameservers): self.nameservers = nameservers
def get_netBlocks(self): return self.netBlocks
def set_netBlocks(self, netBlocks): self.netBlocks = netBlocks
def get_originASes(self): return self.originASes
def set_originASes(self, originASes): self.originASes = originASes
def get_pocs(self): return self.pocs
def set_pocs(self, pocs): self.pocs = pocs
def get_orgRef(self): return self.orgRef
def set_orgRef(self, orgRef): self.orgRef = orgRef
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_parentNetRef(self): return self.parentNetRef
def set_parentNetRef(self, parentNetRef): self.parentNetRef = parentNetRef
def get_comment(self): return self.comment
def set_comment(self, comment): self.comment = comment
def get_startAddress(self): return self.startAddress
def set_startAddress(self, startAddress): self.startAddress = startAddress
def get_updateDate(self): return self.updateDate
def set_updateDate(self, updateDate): self.updateDate = updateDate
def get_version(self): return self.version
def set_version(self, version): self.version = version
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.registrationDate is not None or
self.ref is not None or
self.customerRef is not None or
self.endAddress is not None or
self.handle is not None or
self.name is not None or
self.nameservers is not None or
self.netBlocks is not None or
self.originASes is not None or
self.pocs is not None or
self.orgRef is not None or
self.comment is not None or
self.parentNetRef is not None or
self.comment is not None or
self.startAddress is not None or
self.updateDate is not None or
self.version is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='net', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='net')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='net'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='net', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.registrationDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sregistrationDate>%s</%sregistrationDate>%s' % (namespace_, self.gds_format_datetime(self.registrationDate, input_name='registrationDate'), namespace_, eol_))
if self.ref is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sref>%s</%sref>%s' % (namespace_, self.gds_format_string(quote_xml(self.ref).encode(ExternalEncoding), input_name='ref'), namespace_, eol_))
if self.customerRef is not None:
self.customerRef.export(outfile, level, namespace_, name_='customerRef', pretty_print=pretty_print)
if self.endAddress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sendAddress>%s</%sendAddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.endAddress).encode(ExternalEncoding), input_name='endAddress'), namespace_, eol_))
if self.handle is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%shandle>%s</%shandle>%s' % (namespace_, self.gds_format_string(quote_xml(self.handle).encode(ExternalEncoding), input_name='handle'), namespace_, eol_))
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
if self.nameservers is not None:
self.nameservers.export(outfile, level, namespace_, name_='nameservers', pretty_print=pretty_print)
if self.netBlocks is not None:
self.netBlocks.export(outfile, level, namespace_, name_='netBlocks', pretty_print=pretty_print)
if self.originASes is not None:
self.originASes.export(outfile, level, namespace_, name_='originASes', pretty_print=pretty_print)
if self.pocs is not None:
self.pocs.export(outfile, level, namespace_, name_='pocs', pretty_print=pretty_print)
if self.orgRef is not None:
self.orgRef.export(outfile, level, namespace_, name_='orgRef', pretty_print=pretty_print)
if self.comment is not None:
self.comment.export(outfile, level, namespace_, name_='comment', pretty_print=pretty_print)
if self.parentNetRef is not None:
self.parentNetRef.export(outfile, level, namespace_, name_='parentNetRef', pretty_print=pretty_print)
if self.comment is not None:
self.comment.export(outfile, level, namespace_, name_='comment', pretty_print=pretty_print)
if self.startAddress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstartAddress>%s</%sstartAddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.startAddress).encode(ExternalEncoding), input_name='startAddress'), namespace_, eol_))
if self.updateDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%supdateDate>%s</%supdateDate>%s' % (namespace_, self.gds_format_datetime(self.updateDate, input_name='updateDate'), namespace_, eol_))
if self.version is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sversion>%s</%sversion>%s' % (namespace_, self.gds_format_integer(self.version, input_name='version'), namespace_, eol_))
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='net'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.registrationDate is not None:
showIndent(outfile, level)
outfile.write('registrationDate=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.registrationDate, input_name='registrationDate'))
if self.ref is not None:
showIndent(outfile, level)
outfile.write('ref=%s,\n' % quote_python(self.ref).encode(ExternalEncoding))
if self.customerRef is not None:
showIndent(outfile, level)
outfile.write('customerRef=model_.customerRef(\n')
self.customerRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.endAddress is not None:
showIndent(outfile, level)
outfile.write('endAddress=%s,\n' % quote_python(self.endAddress).encode(ExternalEncoding))
if self.handle is not None:
showIndent(outfile, level)
outfile.write('handle=%s,\n' % quote_python(self.handle).encode(ExternalEncoding))
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
if self.nameservers is not None:
showIndent(outfile, level)
outfile.write('nameservers=model_.nameservers(\n')
self.nameservers.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.netBlocks is not None:
showIndent(outfile, level)
outfile.write('netBlocks=model_.netBlocks(\n')
self.netBlocks.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.originASes is not None:
showIndent(outfile, level)
outfile.write('originASes=model_.originASes(\n')
self.originASes.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.pocs is not None:
showIndent(outfile, level)
outfile.write('pocs=model_.pocs(\n')
self.pocs.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.orgRef is not None:
showIndent(outfile, level)
outfile.write('orgRef=model_.orgRef(\n')
self.orgRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=model_.comment(\n')
self.comment.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.parentNetRef is not None:
showIndent(outfile, level)
outfile.write('parentNetRef=model_.parentNetRef(\n')
self.parentNetRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.comment is not None:
showIndent(outfile, level)
outfile.write('comment=model_.comment(\n')
self.comment.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.startAddress is not None:
showIndent(outfile, level)
outfile.write('startAddress=%s,\n' % quote_python(self.startAddress).encode(ExternalEncoding))
if self.updateDate is not None:
showIndent(outfile, level)
outfile.write('updateDate=model_.GeneratedsSuper.gds_parse_datetime("%s"),\n' % self.gds_format_datetime(self.updateDate, input_name='updateDate'))
if self.version is not None:
showIndent(outfile, level)
outfile.write('version=%d,\n' % self.version)
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'registrationDate':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.registrationDate = dval_
elif nodeName_ == 'ref':
ref_ = child_.text
ref_ = self.gds_validate_string(ref_, node, 'ref')
self.ref = ref_
elif nodeName_ == 'customerRef':
obj_ = customerRef.factory()
obj_.build(child_)
self.set_customerRef(obj_)
elif nodeName_ == 'endAddress':
endAddress_ = child_.text
endAddress_ = self.gds_validate_string(endAddress_, node, 'endAddress')
self.endAddress = endAddress_
elif nodeName_ == 'handle':
handle_ = child_.text
handle_ = self.gds_validate_string(handle_, node, 'handle')
self.handle = handle_
elif nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
elif nodeName_ == 'nameservers':
obj_ = nameservers.factory()
obj_.build(child_)
self.set_nameservers(obj_)
elif nodeName_ == 'netBlocks':
obj_ = netBlocks.factory()
obj_.build(child_)
self.set_netBlocks(obj_)
elif nodeName_ == 'originASes':
obj_ = originASes.factory()
obj_.build(child_)
self.set_originASes(obj_)
elif nodeName_ == 'pocs':
obj_ = pocs.factory()
obj_.build(child_)
self.set_pocs(obj_)
elif nodeName_ == 'orgRef':
obj_ = orgRef.factory()
obj_.build(child_)
self.set_orgRef(obj_)
elif nodeName_ == 'comment':
obj_ = comment.factory()
obj_.build(child_)
self.set_comment(obj_)
elif nodeName_ == 'parentNetRef':
obj_ = parentNetRef.factory()
obj_.build(child_)
self.set_parentNetRef(obj_)
elif nodeName_ == 'comment':
obj_ = comment.factory()
obj_.build(child_)
self.set_comment(obj_)
elif nodeName_ == 'startAddress':
startAddress_ = child_.text
startAddress_ = self.gds_validate_string(startAddress_, node, 'startAddress')
self.startAddress = startAddress_
elif nodeName_ == 'updateDate':
sval_ = child_.text
dval_ = self.gds_parse_datetime(sval_)
self.updateDate = dval_
elif nodeName_ == 'version':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'version')
self.version = ival_
else:
obj_ = self.gds_build_any(child_, 'net')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class net
class customerRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, handle=None, name=None, valueOf_=None):
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if customerRef.subclass:
return customerRef.subclass(*args_, **kwargs_)
else:
return customerRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='customerRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='customerRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='customerRef'):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='customerRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='customerRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class customerRef
class nameservers(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, nameserver=None):
if nameserver is None:
self.nameserver = []
else:
self.nameserver = nameserver
def factory(*args_, **kwargs_):
if nameservers.subclass:
return nameservers.subclass(*args_, **kwargs_)
else:
return nameservers(*args_, **kwargs_)
factory = staticmethod(factory)
def get_nameserver(self): return self.nameserver
def set_nameserver(self, nameserver): self.nameserver = nameserver
def add_nameserver(self, value): self.nameserver.append(value)
def insert_nameserver(self, index, value): self.nameserver[index] = value
def hasContent_(self):
if (
self.nameserver
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='nameservers', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='nameservers')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='nameservers'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='nameservers', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for nameserver_ in self.nameserver:
showIndent(outfile, level, pretty_print)
outfile.write('<%snameserver>%s</%snameserver>%s' % (namespace_, self.gds_format_string(quote_xml(nameserver_).encode(ExternalEncoding), input_name='nameserver'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='nameservers'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('nameserver=[\n')
level += 1
for nameserver_ in self.nameserver:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(nameserver_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'nameserver':
nameserver_ = child_.text
nameserver_ = self.gds_validate_string(nameserver_, node, 'nameserver')
self.nameserver.append(nameserver_)
# end class nameservers
class netBlocks(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, netBlock=None):
if netBlock is None:
self.netBlock = []
else:
self.netBlock = netBlock
def factory(*args_, **kwargs_):
if netBlocks.subclass:
return netBlocks.subclass(*args_, **kwargs_)
else:
return netBlocks(*args_, **kwargs_)
factory = staticmethod(factory)
def get_netBlock(self): return self.netBlock
def set_netBlock(self, netBlock): self.netBlock = netBlock
def add_netBlock(self, value): self.netBlock.append(value)
def insert_netBlock(self, index, value): self.netBlock[index] = value
def hasContent_(self):
if (
self.netBlock
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='netBlocks', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='netBlocks')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='netBlocks'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='netBlocks', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for netBlock_ in self.netBlock:
netBlock_.export(outfile, level, namespace_, name_='netBlock', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='netBlocks'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('netBlock=[\n')
level += 1
for netBlock_ in self.netBlock:
showIndent(outfile, level)
outfile.write('model_.netBlock(\n')
netBlock_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'netBlock':
obj_ = netBlock.factory()
obj_.build(child_)
self.netBlock.append(obj_)
# end class netBlocks
class originASes(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, originAS=None):
if originAS is None:
self.originAS = []
else:
self.originAS = originAS
def factory(*args_, **kwargs_):
if originASes.subclass:
return originASes.subclass(*args_, **kwargs_)
else:
return originASes(*args_, **kwargs_)
factory = staticmethod(factory)
def get_originAS(self): return self.originAS
def set_originAS(self, originAS): self.originAS = originAS
def add_originAS(self, value): self.originAS.append(value)
def insert_originAS(self, index, value): self.originAS[index] = value
def hasContent_(self):
if (
self.originAS
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='originASes', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='originASes')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='originASes'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='originASes', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for originAS_ in self.originAS:
showIndent(outfile, level, pretty_print)
outfile.write('<%soriginAS>%s</%soriginAS>%s' % (namespace_, self.gds_format_string(quote_xml(originAS_).encode(ExternalEncoding), input_name='originAS'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='originASes'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('originAS=[\n')
level += 1
for originAS_ in self.originAS:
showIndent(outfile, level)
outfile.write('%s,\n' % quote_python(originAS_).encode(ExternalEncoding))
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'originAS':
originAS_ = child_.text
originAS_ = self.gds_validate_string(originAS_, node, 'originAS')
self.originAS.append(originAS_)
# end class originASes
class pocs(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, limitExceeded=None, pocRef=None, pocLinkRef=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
self.limitExceeded = limitExceeded
self.pocRef = pocRef
self.pocLinkRef = pocLinkRef
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if pocs.subclass:
return pocs.subclass(*args_, **kwargs_)
else:
return pocs(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limitExceeded(self): return self.limitExceeded
def set_limitExceeded(self, limitExceeded): self.limitExceeded = limitExceeded
def get_pocRef(self): return self.pocRef
def set_pocRef(self, pocRef): self.pocRef = pocRef
def get_pocLinkRef(self): return self.pocLinkRef
def set_pocLinkRef(self, pocLinkRef): self.pocLinkRef = pocLinkRef
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.limitExceeded is not None or
self.pocRef is not None or
self.pocLinkRef is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='pocs', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='pocs')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='pocs'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='pocs', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.limitExceeded is not None:
self.limitExceeded.export(outfile, level, namespace_, name_='limitExceeded', pretty_print=pretty_print)
if self.pocRef is not None:
self.pocRef.export(outfile, level, namespace_, name_='pocRef', pretty_print=pretty_print)
if self.pocLinkRef is not None:
self.pocLinkRef.export(outfile, level, namespace_, name_='pocLinkRef', pretty_print=pretty_print)
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='pocs'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.limitExceeded is not None:
showIndent(outfile, level)
outfile.write('limitExceeded=model_.limitExceeded(\n')
self.limitExceeded.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.pocRef is not None:
showIndent(outfile, level)
outfile.write('pocRef=model_.pocRef(\n')
self.pocRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.pocLinkRef is not None:
showIndent(outfile, level)
outfile.write('pocLinkRef=model_.pocLinkRef(\n')
self.pocLinkRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'limitExceeded':
obj_ = limitExceeded.factory()
obj_.build(child_)
self.set_limitExceeded(obj_)
elif nodeName_ == 'pocRef':
obj_ = pocRef.factory()
obj_.build(child_)
self.set_pocRef(obj_)
elif nodeName_ == 'pocLinkRef':
obj_ = pocLinkRef.factory()
obj_.build(child_)
self.set_pocLinkRef(obj_)
else:
obj_ = self.gds_build_any(child_, 'pocs')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class pocs
class orgRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, handle=None, name=None, valueOf_=None):
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if orgRef.subclass:
return orgRef.subclass(*args_, **kwargs_)
else:
return orgRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='orgRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='orgRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='orgRef'):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='orgRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='orgRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class orgRef
class comment(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, line=None):
if line is None:
self.line = []
else:
self.line = line
def factory(*args_, **kwargs_):
if comment.subclass:
return comment.subclass(*args_, **kwargs_)
else:
return comment(*args_, **kwargs_)
factory = staticmethod(factory)
def get_line(self): return self.line
def set_line(self, line): self.line = line
def add_line(self, value): self.line.append(value)
def insert_line(self, index, value): self.line[index] = value
def hasContent_(self):
if (
self.line
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='comment', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='comment')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='comment'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='comment', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for line_ in self.line:
line_.export(outfile, level, namespace_, name_='line', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='comment'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('line=[\n')
level += 1
for line_ in self.line:
showIndent(outfile, level)
outfile.write('model_.line(\n')
line_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'line':
obj_ = line.factory()
obj_.build(child_)
self.line.append(obj_)
# end class comment
class parentNetRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, handle=None, name=None, valueOf_=None):
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if parentNetRef.subclass:
return parentNetRef.subclass(*args_, **kwargs_)
else:
return parentNetRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='parentNetRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='parentNetRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='parentNetRef'):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='parentNetRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='parentNetRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class parentNetRef
class netBlock(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, termsOfUse=None, ref=None, addressRange=None, netRef=None, cidrLength=None, endAddress=None, description=None, type_=None, startAddress=None, anytypeobjs_=None):
self.termsOfUse = _cast(None, termsOfUse)
self.ref = ref
self.addressRange = addressRange
self.netRef = netRef
self.cidrLength = cidrLength
self.endAddress = endAddress
self.description = description
self.type_ = type_
self.startAddress = startAddress
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if netBlock.subclass:
return netBlock.subclass(*args_, **kwargs_)
else:
return netBlock(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ref(self): return self.ref
def set_ref(self, ref): self.ref = ref
def get_addressRange(self): return self.addressRange
def set_addressRange(self, addressRange): self.addressRange = addressRange
def get_netRef(self): return self.netRef
def set_netRef(self, netRef): self.netRef = netRef
def get_cidrLength(self): return self.cidrLength
def set_cidrLength(self, cidrLength): self.cidrLength = cidrLength
def get_endAddress(self): return self.endAddress
def set_endAddress(self, endAddress): self.endAddress = endAddress
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_startAddress(self): return self.startAddress
def set_startAddress(self, startAddress): self.startAddress = startAddress
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def get_termsOfUse(self): return self.termsOfUse
def set_termsOfUse(self, termsOfUse): self.termsOfUse = termsOfUse
def hasContent_(self):
if (
self.ref is not None or
self.addressRange is not None or
self.netRef is not None or
self.cidrLength is not None or
self.endAddress is not None or
self.description is not None or
self.type_ is not None or
self.startAddress is not None or
self.anytypeobjs_ is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='netBlock', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='netBlock')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='netBlock'):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
outfile.write(' termsOfUse=%s' % (self.gds_format_string(quote_attrib(self.termsOfUse).encode(ExternalEncoding), input_name='termsOfUse'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='netBlock', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.ref is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sref>%s</%sref>%s' % (namespace_, self.gds_format_string(quote_xml(self.ref).encode(ExternalEncoding), input_name='ref'), namespace_, eol_))
if self.addressRange is not None:
self.addressRange.export(outfile, level, namespace_, name_='addressRange', pretty_print=pretty_print)
if self.netRef is not None:
self.netRef.export(outfile, level, namespace_, name_='netRef', pretty_print=pretty_print)
if self.cidrLength is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scidrLength>%s</%scidrLength>%s' % (namespace_, self.gds_format_integer(self.cidrLength, input_name='cidrLength'), namespace_, eol_))
if self.endAddress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sendAddress>%s</%sendAddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.endAddress).encode(ExternalEncoding), input_name='endAddress'), namespace_, eol_))
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
if self.type_ is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stype>%s</%stype>%s' % (namespace_, self.gds_format_string(quote_xml(self.type_).encode(ExternalEncoding), input_name='type'), namespace_, eol_))
if self.startAddress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstartAddress>%s</%sstartAddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.startAddress).encode(ExternalEncoding), input_name='startAddress'), namespace_, eol_))
if self.anytypeobjs_ is not None:
self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='netBlock'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.termsOfUse is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
showIndent(outfile, level)
outfile.write('termsOfUse="%s",\n' % (self.termsOfUse,))
def exportLiteralChildren(self, outfile, level, name_):
if self.ref is not None:
showIndent(outfile, level)
outfile.write('ref=%s,\n' % quote_python(self.ref).encode(ExternalEncoding))
if self.addressRange is not None:
showIndent(outfile, level)
outfile.write('addressRange=model_.addressRange(\n')
self.addressRange.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.netRef is not None:
showIndent(outfile, level)
outfile.write('netRef=model_.netRef(\n')
self.netRef.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.cidrLength is not None:
showIndent(outfile, level)
outfile.write('cidrLength=%d,\n' % self.cidrLength)
if self.endAddress is not None:
showIndent(outfile, level)
outfile.write('endAddress=%s,\n' % quote_python(self.endAddress).encode(ExternalEncoding))
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
if self.type_ is not None:
showIndent(outfile, level)
outfile.write('type_=%s,\n' % quote_python(self.type_).encode(ExternalEncoding))
if self.startAddress is not None:
showIndent(outfile, level)
outfile.write('startAddress=%s,\n' % quote_python(self.startAddress).encode(ExternalEncoding))
if self.anytypeobjs_ is not None:
showIndent(outfile, level)
outfile.write('anytypeobjs_=model_.anytypeobjs_(\n')
self.anytypeobjs_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('termsOfUse', node)
if value is not None and 'termsOfUse' not in already_processed:
already_processed.add('termsOfUse')
self.termsOfUse = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'ref':
ref_ = child_.text
ref_ = self.gds_validate_string(ref_, node, 'ref')
self.ref = ref_
elif nodeName_ == 'addressRange':
obj_ = addressRange.factory()
obj_.build(child_)
self.set_addressRange(obj_)
elif nodeName_ == 'netRef':
obj_ = netRef.factory()
obj_.build(child_)
self.set_netRef(obj_)
elif nodeName_ == 'cidrLength':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'cidrLength')
self.cidrLength = ival_
elif nodeName_ == 'endAddress':
endAddress_ = child_.text
endAddress_ = self.gds_validate_string(endAddress_, node, 'endAddress')
self.endAddress = endAddress_
elif nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
elif nodeName_ == 'type':
type_ = child_.text
type_ = self.gds_validate_string(type_, node, 'type')
self.type_ = type_
elif nodeName_ == 'startAddress':
startAddress_ = child_.text
startAddress_ = self.gds_validate_string(startAddress_, node, 'startAddress')
self.startAddress = startAddress_
else:
obj_ = self.gds_build_any(child_, 'netBlock')
if obj_ is not None:
self.set_anytypeobjs_(obj_)
# end class netBlock
class limitExceeded(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, limit=None, valueOf_=None):
self.limit = _cast(int, limit)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if limitExceeded.subclass:
return limitExceeded.subclass(*args_, **kwargs_)
else:
return limitExceeded(*args_, **kwargs_)
factory = staticmethod(factory)
def get_limit(self): return self.limit
def set_limit(self, limit): self.limit = limit
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='limitExceeded', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='limitExceeded')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='limitExceeded'):
if self.limit is not None and 'limit' not in already_processed:
already_processed.add('limit')
outfile.write(' limit="%s"' % self.gds_format_integer(self.limit, input_name='limit'))
def exportChildren(self, outfile, level, namespace_='v1:', name_='limitExceeded', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='limitExceeded'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.limit is not None and 'limit' not in already_processed:
already_processed.add('limit')
showIndent(outfile, level)
outfile.write('limit=%d,\n' % (self.limit,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('limit', node)
if value is not None and 'limit' not in already_processed:
already_processed.add('limit')
try:
self.limit = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class limitExceeded
class pocRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, handle=None, name=None, valueOf_=None):
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if pocRef.subclass:
return pocRef.subclass(*args_, **kwargs_)
else:
return pocRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='pocRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='pocRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='pocRef'):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='pocRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='pocRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class pocRef
class pocLinkRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, function=None, handle=None, description=None, valueOf_=None):
self.function = _cast(None, function)
self.handle = _cast(None, handle)
self.description = _cast(None, description)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if pocLinkRef.subclass:
return pocLinkRef.subclass(*args_, **kwargs_)
else:
return pocLinkRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_function(self): return self.function
def set_function(self, function): self.function = function
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='pocLinkRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='pocLinkRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='pocLinkRef'):
if self.function is not None and 'function' not in already_processed:
already_processed.add('function')
outfile.write(' function=%s' % (self.gds_format_string(quote_attrib(self.function).encode(ExternalEncoding), input_name='function'), ))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.description is not None and 'description' not in already_processed:
already_processed.add('description')
outfile.write(' description=%s' % (self.gds_format_string(quote_attrib(self.description).encode(ExternalEncoding), input_name='description'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='pocLinkRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='pocLinkRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.function is not None and 'function' not in already_processed:
already_processed.add('function')
showIndent(outfile, level)
outfile.write('function="%s",\n' % (self.function,))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.description is not None and 'description' not in already_processed:
already_processed.add('description')
showIndent(outfile, level)
outfile.write('description="%s",\n' % (self.description,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('function', node)
if value is not None and 'function' not in already_processed:
already_processed.add('function')
self.function = value
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('description', node)
if value is not None and 'description' not in already_processed:
already_processed.add('description')
self.description = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class pocLinkRef
class line(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, number=None, valueOf_=None, mixedclass_=None, content_=None):
self.number = _cast(int, number)
self.valueOf_ = valueOf_
if mixedclass_ is None:
self.mixedclass_ = MixedContainer
else:
self.mixedclass_ = mixedclass_
if content_ is None:
self.content_ = []
else:
self.content_ = content_
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if line.subclass:
return line.subclass(*args_, **kwargs_)
else:
return line(*args_, **kwargs_)
factory = staticmethod(factory)
def get_number(self): return self.number
def set_number(self, number): self.number = number
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='line', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='line')
outfile.write('>')
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='line'):
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
outfile.write(' number="%s"' % self.gds_format_integer(self.number, input_name='number'))
def exportChildren(self, outfile, level, namespace_='v1:', name_='line', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='line'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.number is not None and 'number' not in already_processed:
already_processed.add('number')
showIndent(outfile, level)
outfile.write('number=%d,\n' % (self.number,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
if node.text is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', node.text)
self.content_.append(obj_)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('number', node)
if value is not None and 'number' not in already_processed:
already_processed.add('number')
try:
self.number = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if not fromsubclass_ and child_.tail is not None:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.tail)
self.content_.append(obj_)
pass
# end class line
class addressRange(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, cidrLength=None, endAddress=None, description=None, type_=None, startAddress=None):
self.cidrLength = cidrLength
self.endAddress = endAddress
self.description = description
self.type_ = type_
self.startAddress = startAddress
def factory(*args_, **kwargs_):
if addressRange.subclass:
return addressRange.subclass(*args_, **kwargs_)
else:
return addressRange(*args_, **kwargs_)
factory = staticmethod(factory)
def get_cidrLength(self): return self.cidrLength
def set_cidrLength(self, cidrLength): self.cidrLength = cidrLength
def get_endAddress(self): return self.endAddress
def set_endAddress(self, endAddress): self.endAddress = endAddress
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_startAddress(self): return self.startAddress
def set_startAddress(self, startAddress): self.startAddress = startAddress
def hasContent_(self):
if (
self.cidrLength is not None or
self.endAddress is not None or
self.description is not None or
self.type_ is not None or
self.startAddress is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='addressRange', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='addressRange')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='addressRange'):
pass
def exportChildren(self, outfile, level, namespace_='v1:', name_='addressRange', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.cidrLength is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%scidrLength>%s</%scidrLength>%s' % (namespace_, self.gds_format_integer(self.cidrLength, input_name='cidrLength'), namespace_, eol_))
if self.endAddress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sendAddress>%s</%sendAddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.endAddress).encode(ExternalEncoding), input_name='endAddress'), namespace_, eol_))
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
if self.type_ is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%stype>%s</%stype>%s' % (namespace_, self.gds_format_string(quote_xml(self.type_).encode(ExternalEncoding), input_name='type'), namespace_, eol_))
if self.startAddress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sstartAddress>%s</%sstartAddress>%s' % (namespace_, self.gds_format_string(quote_xml(self.startAddress).encode(ExternalEncoding), input_name='startAddress'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='addressRange'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.cidrLength is not None:
showIndent(outfile, level)
outfile.write('cidrLength=%d,\n' % self.cidrLength)
if self.endAddress is not None:
showIndent(outfile, level)
outfile.write('endAddress=%s,\n' % quote_python(self.endAddress).encode(ExternalEncoding))
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
if self.type_ is not None:
showIndent(outfile, level)
outfile.write('type_=%s,\n' % quote_python(self.type_).encode(ExternalEncoding))
if self.startAddress is not None:
showIndent(outfile, level)
outfile.write('startAddress=%s,\n' % quote_python(self.startAddress).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'cidrLength':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'cidrLength')
self.cidrLength = ival_
elif nodeName_ == 'endAddress':
endAddress_ = child_.text
endAddress_ = self.gds_validate_string(endAddress_, node, 'endAddress')
self.endAddress = endAddress_
elif nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
elif nodeName_ == 'type':
type_ = child_.text
type_ = self.gds_validate_string(type_, node, 'type')
self.type_ = type_
elif nodeName_ == 'startAddress':
startAddress_ = child_.text
startAddress_ = self.gds_validate_string(startAddress_, node, 'startAddress')
self.startAddress = startAddress_
# end class addressRange
class netRef(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, startAddress=None, endAddress=None, handle=None, name=None, valueOf_=None):
self.startAddress = _cast(None, startAddress)
self.endAddress = _cast(None, endAddress)
self.handle = _cast(None, handle)
self.name = _cast(None, name)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if netRef.subclass:
return netRef.subclass(*args_, **kwargs_)
else:
return netRef(*args_, **kwargs_)
factory = staticmethod(factory)
def get_startAddress(self): return self.startAddress
def set_startAddress(self, startAddress): self.startAddress = startAddress
def get_endAddress(self): return self.endAddress
def set_endAddress(self, endAddress): self.endAddress = endAddress
def get_handle(self): return self.handle
def set_handle(self, handle): self.handle = handle
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def export(self, outfile, level, namespace_='v1:', name_='netRef', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='netRef')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='v1:', name_='netRef'):
if self.startAddress is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
outfile.write(' startAddress=%s' % (self.gds_format_string(quote_attrib(self.startAddress).encode(ExternalEncoding), input_name='startAddress'), ))
if self.endAddress is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
outfile.write(' endAddress=%s' % (self.gds_format_string(quote_attrib(self.endAddress).encode(ExternalEncoding), input_name='endAddress'), ))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
outfile.write(' handle=%s' % (self.gds_format_string(quote_attrib(self.handle).encode(ExternalEncoding), input_name='handle'), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='v1:', name_='netRef', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='netRef'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.startAddress is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
showIndent(outfile, level)
outfile.write('startAddress="%s",\n' % (self.startAddress,))
if self.endAddress is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
showIndent(outfile, level)
outfile.write('endAddress="%s",\n' % (self.endAddress,))
if self.handle is not None and 'handle' not in already_processed:
already_processed.add('handle')
showIndent(outfile, level)
outfile.write('handle="%s",\n' % (self.handle,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name="%s",\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('startAddress', node)
if value is not None and 'startAddress' not in already_processed:
already_processed.add('startAddress')
self.startAddress = value
value = find_attr_value_('endAddress', node)
if value is not None and 'endAddress' not in already_processed:
already_processed.add('endAddress')
self.endAddress = value
value = find_attr_value_('handle', node)
if value is not None and 'handle' not in already_processed:
already_processed.add('handle')
self.handle = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class netRef
GDSClassesMapping = {
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'net'
rootClass = net
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns=http://www.arin.net/whoisrws/core/v1',
pretty_print=True)
return rootObj
def parseEtree(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'net'
rootClass = net
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
roots = get_root_tag(rootNode)
rootClass = roots[1]
if rootClass is None:
rootClass = net
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_="net",
namespacedef_='xmlns=http://www.arin.net/whoisrws/core/v1')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'net'
rootClass = net
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from network import *\n\n')
sys.stdout.write('import network as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"addressRange",
"comment",
"customerRef",
"limitExceeded",
"line",
"nameservers",
"net",
"netBlock",
"netBlocks",
"netRef",
"orgRef",
"originASes",
"parentNetRef",
"pocLinkRef",
"pocRef",
"pocs"
]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectivityInformation(Model):
"""Information on the connectivity status.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar hops: List of hops between the source and the destination.
:vartype hops:
list[~azure.mgmt.network.v2017_03_01.models.ConnectivityHop]
:ivar connection_status: The connection status. Possible values include:
'Unknown', 'Connected', 'Disconnected', 'Degraded'
:vartype connection_status: str or
~azure.mgmt.network.v2017_03_01.models.ConnectionStatus
:ivar avg_latency_in_ms: Average latency in milliseconds.
:vartype avg_latency_in_ms: int
:ivar min_latency_in_ms: Minimum latency in milliseconds.
:vartype min_latency_in_ms: int
:ivar max_latency_in_ms: Maximum latency in milliseconds.
:vartype max_latency_in_ms: int
:ivar probes_sent: Total number of probes sent.
:vartype probes_sent: int
:ivar probes_failed: Number of failed probes.
:vartype probes_failed: int
"""
_validation = {
'hops': {'readonly': True},
'connection_status': {'readonly': True},
'avg_latency_in_ms': {'readonly': True},
'min_latency_in_ms': {'readonly': True},
'max_latency_in_ms': {'readonly': True},
'probes_sent': {'readonly': True},
'probes_failed': {'readonly': True},
}
_attribute_map = {
'hops': {'key': 'hops', 'type': '[ConnectivityHop]'},
'connection_status': {'key': 'connectionStatus', 'type': 'str'},
'avg_latency_in_ms': {'key': 'avgLatencyInMs', 'type': 'int'},
'min_latency_in_ms': {'key': 'minLatencyInMs', 'type': 'int'},
'max_latency_in_ms': {'key': 'maxLatencyInMs', 'type': 'int'},
'probes_sent': {'key': 'probesSent', 'type': 'int'},
'probes_failed': {'key': 'probesFailed', 'type': 'int'},
}
def __init__(self):
self.hops = None
self.connection_status = None
self.avg_latency_in_ms = None
self.min_latency_in_ms = None
self.max_latency_in_ms = None
self.probes_sent = None
self.probes_failed = None
|
print(int(int(input())**.25))
|
"""Per-prefix data, mapping each prefix to a dict of locale:name.
Auto-generated file, do not edit by hand.
"""
from ..util import u
# Copyright (C) 2011-2020 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
data = {
'1242357':{'en': 'BaTelCo'},
'1242359':{'en': 'BaTelCo'},
'1242375':{'en': 'BaTelCo'},
'1242376':{'en': 'BaTelCo'},
'1242395':{'en': 'BaTelCo'},
'124242':{'en': 'BaTelCo'},
'124243':{'en': 'BaTelCo'},
'124244':{'en': 'BaTelCo'},
'124245':{'en': 'BaTelCo'},
'1242462':{'en': 'BaTelCo'},
'1242463':{'en': 'BaTelCo'},
'1242464':{'en': 'BaTelCo'},
'1242465':{'en': 'BaTelCo'},
'1242466':{'en': 'BaTelCo'},
'1242467':{'en': 'BaTelCo'},
'1242468':{'en': 'BaTelCo'},
'124247':{'en': 'BaTelCo'},
'124248':{'en': 'BaTelCo'},
'124252':{'en': 'BaTelCo'},
'124253':{'en': 'BaTelCo'},
'124254':{'en': 'BaTelCo'},
'124255':{'en': 'BaTelCo'},
'124256':{'en': 'BaTelCo'},
'124257':{'en': 'BaTelCo'},
'124263':{'en': 'BaTelCo'},
'1242646':{'en': 'BaTelCo'},
'124272':{'en': 'BaTelCo'},
'124273':{'en': 'aliv'},
'12428':{'en': 'aliv'},
'124623':{'en': 'LIME'},
'124624':{'en': 'LIME'},
'124625':{'en': 'LIME'},
'1246256':{'en': 'Digicel'},
'1246257':{'en': 'Digicel'},
'1246258':{'en': 'Digicel'},
'1246259':{'en': 'Digicel'},
'124626':{'en': 'Digicel'},
'124628':{'en': 'Cable & Wireless'},
'124645':{'en': 'Sunbeach Communications'},
'124669':{'en': 'Ozone'},
'12468':{'en': 'Digicel'},
'1264469':{'en': 'Cable & Wireless'},
'126453':{'en': 'Weblinks Limited'},
'126458':{'en': 'Digicel'},
'1264729':{'en': 'Cable & Wireless'},
'126477':{'en': 'Cable & Wireless'},
'126871':{'en': 'Digicel'},
'1268720':{'en': 'Digicel'},
'1268721':{'en': 'Digicel'},
'1268722':{'en': 'Digicel'},
'1268724':{'en': 'Digicel'},
'1268725':{'en': 'Digicel'},
'1268726':{'en': 'Digicel'},
'1268727':{'en': 'APUA'},
'1268729':{'en': 'APUA'},
'1268730':{'en': 'APUA'},
'1268732':{'en': 'Digicel'},
'1268734':{'en': 'Digicel'},
'1268736':{'en': 'Digicel'},
'1268773':{'en': 'APUA'},
'1268774':{'en': 'APUA'},
'1268775':{'en': 'APUA'},
'1268780':{'en': 'APUA'},
'1268781':{'en': 'APUA'},
'1268783':{'en': 'Digicel'},
'1268785':{'en': 'Digicel'},
'1268787':{'en': 'Cable & Wireless'},
'1268788':{'en': 'Digicel'},
'128424':{'en': 'Cable & Wireless'},
'1284300':{'en': 'Digicel'},
'128434':{'en': 'Digicel'},
'128436':{'en': 'Digicel'},
'128439':{'en': 'Digicel'},
'128444':{'en': 'CCT'},
'12844689':{'en': 'CCT'},
'12844966':{'en': 'CCT'},
'12844967':{'en': 'CCT'},
'12844968':{'en': 'CCT'},
'12844969':{'en': 'CCT'},
'1284499':{'en': 'CCT'},
'1284546':{'en': 'Cable & Wireless'},
'128456':{'en': 'Cable & Wireless'},
'128459':{'en': 'Cable & Wireless'},
'1340423':{'en': 'Vitelcom Cellular'},
'134044':{'en': 'GIGSKY Mobile'},
'1340725':{'en': 'Vitelcom Cellular'},
'134532':{'en': 'Digicel'},
'134542':{'en': 'Digicel'},
'134551':{'en': 'Digicel'},
'134552':{'en': 'Digicel'},
'134554':{'en': 'Digicel'},
'134555':{'en': 'Digicel'},
'1345649':{'en': 'Digicel'},
'1345919':{'en': 'Cable & Wireless'},
'1345930':{'en': 'LIME'},
'1345936':{'en': 'Cable & Wireless'},
'1345937':{'en': 'Cable & Wireless'},
'1345938':{'en': 'Cable & Wireless'},
'1345939':{'en': 'Cable & Wireless'},
'134599':{'en': 'Cable & Wireless'},
'14412':{'en': 'Cellular One'},
'14413':{'en': 'Mobility'},
'144150':{'en': 'Digicel Bermuda'},
'144151':{'en': 'Digicel Bermuda'},
'144152':{'en': 'Digicel Bermuda'},
'144153':{'en': 'Digicel Bermuda'},
'144159':{'en': 'Digicel Bermuda'},
'14417':{'en': 'Cellular One'},
'14418':{'en': 'Cellular One'},
'1473402':{'en': 'Affordable Island Communications'},
'147341':{'en': 'Digicel Grenada'},
'147342':{'en': 'Digicel Grenada'},
'147352':{'en': 'Affordable Island Communications'},
'147353':{'en': 'AWS Grenada'},
'147390':{'en': 'Affordable Island Communications'},
'164923':{'en': 'C&W'},
'164924':{'en': 'Cable & Wireless'},
'16493':{'en': 'Digicel'},
'164943':{'en': 'Islandcom'},
'1658295':{'en': 'Cable & Wireless'},
'1659200':{'en': 'Onvoy'},
'1659222':{'en': 'Onvoy'},
'1659300':{'en': 'Onvoy'},
'1659400':{'en': 'Onvoy'},
'1659444':{'en': 'Onvoy'},
'1659500':{'en': 'Onvoy'},
'1659529':{'en': 'Fractel'},
'1659600':{'en': 'Onvoy'},
'1659666':{'en': 'Onvoy'},
'1659766':{'en': 'Fractel'},
'1659777':{'en': 'Onvoy'},
'1659800':{'en': 'Onvoy'},
'1659888':{'en': 'Fractel'},
'1659900':{'en': 'Onvoy'},
'1659999':{'en': 'Onvoy'},
'166434':{'en': 'Cable & Wireless'},
'166439':{'en': 'Digicel'},
'1670284':{'en': 'PTI PACIFICA'},
'167148':{'en': 'GTA'},
'167174':{'en': 'PTI PACIFICA'},
'167183':{'en': 'i CAN_GSM'},
'167184':{'en': 'i CAN_GSM'},
'167185':{'en': 'i CAN_GSM'},
'1671864':{'en': 'GTA'},
'1671868':{'en': 'Choice Phone'},
'167187':{'en': 'Choice Phone'},
'167188':{'en': 'Choice Phone'},
'167189':{'en': 'Choice Phone'},
'168424':{'en': 'ASTCA'},
'168425':{'en': 'Blue Sky'},
'168427':{'en': 'Blue Sky'},
'16847':{'en': 'ASTCA'},
'175828':{'en': 'Cable & Wireless'},
'17583':{'en': 'Cable & Wireless'},
'1758460':{'en': 'Cable & Wireless'},
'1758461':{'en': 'Cable & Wireless'},
'1758484':{'en': 'Cable & Wireless'},
'1758485':{'en': 'Cable & Wireless'},
'1758486':{'en': 'Cable & Wireless'},
'1758487':{'en': 'Cable & Wireless'},
'1758488':{'en': 'Cable & Wireless'},
'1758489':{'en': 'Cable & Wireless'},
'175851':{'en': 'Digicel'},
'175852':{'en': 'Digicel'},
'175858':{'en': 'Cable & Wireless'},
'175871':{'en': 'Digicel'},
'175872':{'en': 'Digicel'},
'175873':{'en': 'Digicel'},
'17588':{'en': 'Digicel'},
'176722':{'en': 'Cable & Wireless'},
'176723':{'en': 'Cable & Wireless'},
'176724':{'en': 'Cable & Wireless'},
'1767265':{'en': 'Cable & Wireless'},
'176727':{'en': 'Cable & Wireless'},
'176728':{'en': 'Cable & Wireless'},
'176729':{'en': 'Cable & Wireless'},
'17673':{'en': 'Digicel'},
'17676':{'en': 'Digicel'},
'1767704':{'en': 'Digicel'},
'1767705':{'en': 'Digicel'},
'1767706':{'en': 'Digicel'},
'1784430':{'en': 'AT&T'},
'1784431':{'en': 'AT&T'},
'1784432':{'en': 'AT&T'},
'1784433':{'en': 'Digicel'},
'1784434':{'en': 'Digicel'},
'1784435':{'en': 'Digicel'},
'1784454':{'en': 'Cable & Wireless'},
'1784455':{'en': 'Cable & Wireless'},
'1784489':{'en': 'Cable & Wireless'},
'1784490':{'en': 'Cable & Wireless'},
'1784491':{'en': 'Cable & Wireless'},
'1784492':{'en': 'Cable & Wireless'},
'1784493':{'en': 'Cable & Wireless'},
'1784494':{'en': 'Cable & Wireless'},
'1784495':{'en': 'Cable & Wireless'},
'178452':{'en': 'Digicel'},
'178453':{'en': 'Digicel'},
'178472':{'en': 'Digicel'},
'1787203':{'en': 'Claro'},
'1787210':{'en': 'SunCom Wireless Puerto Rico'},
'1787212':{'en': 'Claro'},
'1787213':{'en': 'Claro'},
'1787214':{'en': 'Claro'},
'1787215':{'en': 'Claro'},
'1787216':{'en': 'Claro'},
'1787217':{'en': 'Claro'},
'1787218':{'en': 'Claro'},
'1787219':{'en': 'Claro'},
'1787220':{'en': 'CENTENNIAL'},
'1787221':{'en': 'CENTENNIAL'},
'1787222':{'en': 'CENTENNIAL'},
'1787223':{'en': 'CENTENNIAL'},
'1787224':{'en': 'CENTENNIAL'},
'1787225':{'en': 'SunCom Wireless Puerto Rico'},
'1787226':{'en': 'SunCom Wireless Puerto Rico'},
'1787227':{'en': 'CENTENNIAL'},
'1787229':{'en': 'CENTENNIAL'},
'1787253':{'en': 'Claro'},
'1787254':{'en': 'Claro'},
'1787255':{'en': 'Claro'},
'1787256':{'en': 'Claro'},
'1787257':{'en': 'Claro'},
'1787258':{'en': 'Claro'},
'1787259':{'en': 'Claro'},
'1787260':{'en': 'Claro'},
'1787291':{'en': 'CENTENNIAL'},
'1787299':{'en': 'SunCom Wireless Puerto Rico'},
'1787300':{'en': 'CENTENNIAL'},
'1787310':{'en': 'SunCom Wireless Puerto Rico'},
'1787312':{'en': 'Claro'},
'1787313':{'en': 'Claro'},
'1787314':{'en': 'Claro'},
'1787315':{'en': 'Claro'},
'1787316':{'en': 'Claro'},
'1787317':{'en': 'Claro'},
'1787318':{'en': 'Claro'},
'17873191':{'en': 'Claro'},
'17873192':{'en': 'Claro'},
'17873193':{'en': 'Claro'},
'17873194':{'en': 'Claro'},
'17873195':{'en': 'Claro'},
'17873196':{'en': 'Claro'},
'17873197':{'en': 'Claro'},
'17873198':{'en': 'Claro'},
'17873199':{'en': 'Claro'},
'1787341':{'en': 'SunCom Wireless Puerto Rico'},
'1787344':{'en': 'SunCom Wireless Puerto Rico'},
'1787346':{'en': 'SunCom Wireless Puerto Rico'},
'1787355':{'en': 'CENTENNIAL'},
'1787357':{'en': 'CENTENNIAL'},
'1787359':{'en': 'SunCom Wireless Puerto Rico'},
'1787367':{'en': 'SunCom Wireless Puerto Rico'},
'1787368':{'en': 'SunCom Wireless Puerto Rico'},
'1787369':{'en': 'CENTENNIAL'},
'1787371':{'en': 'Claro'},
'1787372':{'en': 'Claro'},
'1787374':{'en': 'Claro'},
'1787375':{'en': 'Claro'},
'1787376':{'en': 'Claro'},
'1787380':{'en': 'Claro'},
'1787381':{'en': 'Claro'},
'1787382':{'en': 'Claro'},
'1787383':{'en': 'Claro'},
'1787384':{'en': 'Claro'},
'1787385':{'en': 'Claro'},
'1787389':{'en': 'Claro'},
'1787390':{'en': 'Claro'},
'1787391':{'en': 'Claro'},
'1787392':{'en': 'Claro'},
'1787400':{'en': 'CENTENNIAL'},
'1787410':{'en': 'SunCom Wireless Puerto Rico'},
'1787434':{'en': 'CENTENNIAL'},
'1787447':{'en': 'CENTENNIAL'},
'1787448':{'en': 'CENTENNIAL'},
'1787449':{'en': 'CENTENNIAL'},
'1787450':{'en': 'Claro'},
'1787453':{'en': 'Claro'},
'1787454':{'en': 'SunCom Wireless Puerto Rico'},
'1787458':{'en': 'SunCom Wireless Puerto Rico'},
'1787459':{'en': 'SunCom Wireless Puerto Rico'},
'1787460':{'en': 'SunCom Wireless Puerto Rico'},
'1787462':{'en': 'SunCom Wireless Puerto Rico'},
'1787463':{'en': 'SunCom Wireless Puerto Rico'},
'1787465':{'en': 'CENTENNIAL'},
'1787466':{'en': 'SunCom Wireless Puerto Rico'},
'1787471':{'en': 'CENTENNIAL'},
'1787473':{'en': 'CENTENNIAL'},
'1787474':{'en': 'CENTENNIAL'},
'1787478':{'en': 'SunCom Wireless Puerto Rico'},
'1787479':{'en': 'CENTENNIAL'},
'1787481':{'en': 'Claro'},
'1787484':{'en': 'Claro'},
'1787485':{'en': 'Claro'},
'1787486':{'en': 'Claro'},
'1787487':{'en': 'Claro'},
'1787513':{'en': 'SunCom Wireless Puerto Rico'},
'1787514':{'en': 'Claro'},
'1787515':{'en': 'Claro'},
'1787516':{'en': 'Claro'},
'1787517':{'en': 'Claro'},
'1787518':{'en': 'Claro'},
'1787519':{'en': 'Claro'},
'1787520':{'en': 'CENTENNIAL'},
'1787521':{'en': 'CENTENNIAL'},
'1787522':{'en': 'CENTENNIAL'},
'1787523':{'en': 'CENTENNIAL'},
'1787528':{'en': 'SunCom Wireless Puerto Rico'},
'1787534':{'en': 'CENTENNIAL'},
'1787535':{'en': 'CENTENNIAL'},
'1787537':{'en': 'CENTENNIAL'},
'1787544':{'en': 'CENTENNIAL'},
'1787545':{'en': 'CENTENNIAL'},
'1787546':{'en': 'SunCom Wireless Puerto Rico'},
'1787551':{'en': 'CENTENNIAL'},
'1787553':{'en': 'Claro'},
'1787561':{'en': 'CENTENNIAL'},
'1787563':{'en': 'CENTENNIAL'},
'1787568':{'en': 'SunCom Wireless Puerto Rico'},
'1787569':{'en': 'CENTENNIAL'},
'1787579':{'en': 'Claro'},
'1787580':{'en': 'CENTENNIAL'},
'1787585':{'en': 'CENTENNIAL'},
'1787588':{'en': 'CENTENNIAL'},
'1787589':{'en': 'CENTENNIAL'},
'1787595':{'en': 'SunCom Wireless Puerto Rico'},
'1787597':{'en': 'SunCom Wireless Puerto Rico'},
'1787598':{'en': 'SunCom Wireless Puerto Rico'},
'1787601':{'en': 'SunCom Wireless Puerto Rico'},
'1787602':{'en': 'CENTENNIAL'},
'1787604':{'en': 'SunCom Wireless Puerto Rico'},
'1787605':{'en': 'SunCom Wireless Puerto Rico'},
'1787607':{'en': 'CENTENNIAL'},
'1787608':{'en': 'CENTENNIAL'},
'1787609':{'en': 'CENTENNIAL'},
'1787612':{'en': 'Claro'},
'1787613':{'en': 'Claro'},
'1787614':{'en': 'Claro'},
'1787615':{'en': 'Claro'},
'1787616':{'en': 'Claro'},
'1787617':{'en': 'Claro'},
'1787619':{'en': 'SunCom Wireless Puerto Rico'},
'1787620':{'en': 'CENTENNIAL'},
'1787621':{'en': 'CENTENNIAL'},
'1787622':{'en': 'CENTENNIAL'},
'1787623':{'en': 'CENTENNIAL'},
'1787624':{'en': 'CENTENNIAL'},
'1787625':{'en': 'CENTENNIAL'},
'1787626':{'en': 'CENTENNIAL'},
'1787628':{'en': 'CENTENNIAL'},
'1787629':{'en': 'SunCom Wireless Puerto Rico'},
'178764':{'en': 'CENTENNIAL'},
'178765':{'en': 'CENTENNIAL'},
'1787662':{'en': 'SunCom Wireless Puerto Rico'},
'1787666':{'en': 'SunCom Wireless Puerto Rico'},
'1787673':{'en': 'SunCom Wireless Puerto Rico'},
'1787675':{'en': 'CENTENNIAL'},
'1787678':{'en': 'SunCom Wireless Puerto Rico'},
'1787686':{'en': 'CENTENNIAL'},
'1787687':{'en': 'CENTENNIAL'},
'1787689':{'en': 'CENTENNIAL'},
'1787690':{'en': 'CENTENNIAL'},
'1787692':{'en': 'CENTENNIAL'},
'1787693':{'en': 'CENTENNIAL'},
'1787695':{'en': 'CENTENNIAL'},
'1787717':{'en': 'CENTENNIAL'},
'1787719':{'en': 'CENTENNIAL'},
'1787901':{'en': 'SunCom Wireless Puerto Rico'},
'1787903':{'en': 'CENTENNIAL'},
'1787904':{'en': 'SunCom Wireless Puerto Rico'},
'1787908':{'en': 'CENTENNIAL'},
'1787912':{'en': 'CENTENNIAL'},
'1787915':{'en': 'CENTENNIAL'},
'1787916':{'en': 'CENTENNIAL'},
'1787917':{'en': 'CENTENNIAL'},
'1787922':{'en': 'SunCom Wireless Puerto Rico'},
'1787923':{'en': 'SunCom Wireless Puerto Rico'},
'1787924':{'en': 'CENTENNIAL'},
'1787926':{'en': 'CENTENNIAL'},
'1787927':{'en': 'CENTENNIAL'},
'1787928':{'en': 'CENTENNIAL'},
'1787933':{'en': 'CENTENNIAL'},
'1787935':{'en': 'CENTENNIAL'},
'1787937':{'en': 'CENTENNIAL'},
'1787940':{'en': 'CENTENNIAL'},
'1787947':{'en': 'CENTENNIAL'},
'1787949':{'en': 'SunCom Wireless Puerto Rico'},
'1787952':{'en': 'CENTENNIAL'},
'1787953':{'en': 'CENTENNIAL'},
'1787954':{'en': 'CENTENNIAL'},
'1787957':{'en': 'CENTENNIAL'},
'1787961':{'en': 'CENTENNIAL'},
'1787968':{'en': 'CENTENNIAL'},
'1787969':{'en': 'CENTENNIAL'},
'1787971':{'en': 'CENTENNIAL'},
'1787975':{'en': 'CENTENNIAL'},
'1787978':{'en': 'CENTENNIAL'},
'1787992':{'en': 'CENTENNIAL'},
'1787993':{'en': 'CENTENNIAL'},
'1787998':{'en': 'CENTENNIAL'},
'1787999':{'en': 'CENTENNIAL'},
'180920':{'en': 'Tricom'},
'180922':{'en': 'Claro'},
'180923':{'en': 'Claro'},
'180924':{'en': 'Claro'},
'180925':{'en': 'Claro'},
'180926':{'en': 'Claro'},
'180927':{'en': 'Claro'},
'180928':{'en': 'Claro'},
'180929':{'en': 'Tricom'},
'18093':{'en': 'Claro'},
'180930':{'en': 'Viva'},
'180931':{'en': 'Tricom'},
'180932':{'en': 'Tricom'},
'180934':{'en': 'Tricom'},
'180941':{'en': 'Viva'},
'180942':{'en': 'Claro'},
'180943':{'en': 'Viva'},
'180944':{'en': 'Viva'},
'180945':{'en': 'Claro'},
'180947':{'en': 'Tricom'},
'180948':{'en': 'Claro'},
'180949':{'en': 'Claro'},
'180951':{'en': 'Claro'},
'180954':{'en': 'Claro'},
'180960':{'en': 'Claro'},
'180962':{'en': 'Tricom'},
'180963':{'en': 'Tricom'},
'180964':{'en': 'Tricom'},
'180965':{'en': 'Tricom'},
'180967':{'en': 'Claro'},
'180969':{'en': 'Claro'},
'180970':{'en': 'Claro'},
'180971':{'en': 'Claro'},
'180972':{'en': 'Claro'},
'180974':{'en': 'Claro'},
'180975':{'en': 'Claro'},
'180976':{'en': 'Claro'},
'180977':{'en': 'Viva'},
'180978':{'en': 'Claro'},
'180979':{'en': 'Claro'},
'18098':{'en': 'Orange'},
'180981':{'en': 'Viva'},
'180982':{'en': 'Claro'},
'180983':{'en': 'Claro'},
'180987':{'en': 'Tricom'},
'180991':{'en': 'Orange'},
'180992':{'en': 'Tricom'},
'180993':{'en': 'Tricom'},
'180994':{'en': 'Tricom'},
'180995':{'en': 'Claro'},
'180997':{'en': 'Orange'},
'180998':{'en': 'Orange'},
'180999':{'en': 'Tricom'},
'1868263':{'en': 'Digicel'},
'1868264':{'en': 'Digicel'},
'1868265':{'en': 'Digicel'},
'1868266':{'en': 'bmobile'},
'1868267':{'en': 'bmobile'},
'1868268':{'en': 'bmobile'},
'1868269':{'en': 'bmobile'},
'186827':{'en': 'bmobile'},
'186828':{'en': 'bmobile'},
'186829':{'en': 'bmobile'},
'18683':{'en': 'Digicel'},
'18684':{'en': 'bmobile'},
'1868620':{'en': 'bmobile'},
'1868678':{'en': 'bmobile'},
'186868':{'en': 'bmobile'},
'18687':{'en': 'bmobile'},
'186948':{'en': 'Cable & Wireless'},
'186955':{'en': 'CariGlobe St. Kitts'},
'186956':{'en': 'The Cable St. Kitts'},
'1869660':{'en': 'Cable & Wireless'},
'1869661':{'en': 'Cable & Wireless'},
'1869662':{'en': 'Cable & Wireless'},
'1869663':{'en': 'Cable & Wireless'},
'1869664':{'en': 'Cable & Wireless'},
'1869665':{'en': 'Cable & Wireless'},
'1869667':{'en': 'Cable & Wireless'},
'1869668':{'en': 'Cable & Wireless'},
'1869669':{'en': 'Cable & Wireless'},
'1869760':{'en': 'Digicel'},
'1869762':{'en': 'Digicel'},
'1869763':{'en': 'Digicel'},
'1869764':{'en': 'Digicel'},
'1869765':{'en': 'Digicel'},
'1869766':{'en': 'Digicel'},
'1876210':{'en': 'Cable & Wireless'},
'187622':{'en': 'Cable & Wireless'},
'187623':{'en': 'Cable & Wireless'},
'187624':{'en': 'Digicel'},
'187625':{'en': 'Digicel'},
'187626':{'en': 'Digicel'},
'1876275':{'en': 'Digicel'},
'1876276':{'en': 'Digicel'},
'1876277':{'en': 'Digicel'},
'1876278':{'en': 'Digicel'},
'1876279':{'en': 'Digicel'},
'187628':{'en': 'Digicel'},
'187629':{'en': 'Digicel'},
'187630':{'en': 'Digicel'},
'1876310':{'en': 'Cable & Wireless'},
'1876312':{'en': 'Cable & Wireless'},
'1876313':{'en': 'Cable & Wireless'},
'1876314':{'en': 'Cable & Wireless'},
'1876315':{'en': 'Cable & Wireless'},
'1876316':{'en': 'Cable & Wireless'},
'1876317':{'en': 'Cable & Wireless'},
'1876318':{'en': 'Cable & Wireless'},
'1876319':{'en': 'Cable & Wireless'},
'187632':{'en': 'Cable & Wireless'},
'187633':{'en': 'Cable & Wireless'},
'187634':{'en': 'Cable & Wireless'},
'187635':{'en': 'Digicel'},
'187636':{'en': 'Digicel'},
'187637':{'en': 'Digicel'},
'187638':{'en': 'Digicel'},
'187639':{'en': 'Digicel'},
'187640':{'en': 'Digicel'},
'187641':{'en': 'Digicel'},
'187642':{'en': 'Digicel'},
'187643':{'en': 'Digicel'},
'1876440':{'en': 'Digicel'},
'1876441':{'en': 'Digicel'},
'1876442':{'en': 'Digicel'},
'1876443':{'en': 'Digicel'},
'1876445':{'en': 'Digicel'},
'1876446':{'en': 'Digicel'},
'1876447':{'en': 'Digicel'},
'1876448':{'en': 'Digicel'},
'1876449':{'en': 'Digicel'},
'187645':{'en': 'Digicel'},
'187646':{'en': 'Digicel'},
'187647':{'en': 'Digicel'},
'187648':{'en': 'Digicel'},
'187649':{'en': 'Digicel'},
'1876501':{'en': 'Cable & Wireless'},
'1876503':{'en': 'Digicel'},
'1876504':{'en': 'Digicel'},
'1876505':{'en': 'Digicel'},
'1876506':{'en': 'Digicel'},
'1876507':{'en': 'Digicel'},
'1876508':{'en': 'Digicel'},
'1876509':{'en': 'Digicel'},
'1876515':{'en': 'Cable & Wireless'},
'1876517':{'en': 'Cable & Wireless'},
'1876519':{'en': 'Cable & Wireless'},
'187652':{'en': 'Digicel'},
'187653':{'en': 'Cable & Wireless'},
'187654':{'en': 'Cable & Wireless'},
'1876550':{'en': 'Digicel'},
'1876551':{'en': 'Digicel'},
'1876552':{'en': 'Digicel'},
'1876553':{'en': 'Digicel'},
'1876554':{'en': 'Digicel'},
'1876556':{'en': 'Digicel'},
'1876557':{'en': 'Digicel'},
'1876558':{'en': 'Digicel'},
'1876559':{'en': 'Digicel'},
'1876560':{'en': 'Digicel'},
'1876561':{'en': 'Digicel'},
'1876562':{'en': 'Digicel'},
'1876564':{'en': 'Digicel'},
'1876565':{'en': 'Digicel'},
'1876566':{'en': 'Digicel'},
'1876567':{'en': 'Digicel'},
'1876568':{'en': 'Digicel'},
'1876569':{'en': 'Digicel'},
'187657':{'en': 'Digicel'},
'187658':{'en': 'Digicel'},
'187659':{'en': 'Digicel'},
'1876648':{'en': 'Digicel'},
'1876649':{'en': 'Digicel'},
'1876666':{'en': 'Digicel'},
'1876667':{'en': 'Digicel'},
'1876700':{'en': 'Cable & Wireless'},
'1876707':{'en': 'Cable & Wireless'},
'187677':{'en': 'Cable & Wireless'},
'1876781':{'en': 'Cable & Wireless'},
'1876782':{'en': 'Cable & Wireless'},
'1876783':{'en': 'Cable & Wireless'},
'1876784':{'en': 'Cable & Wireless'},
'1876787':{'en': 'Cable & Wireless'},
'1876788':{'en': 'Cable & Wireless'},
'1876789':{'en': 'Cable & Wireless'},
'1876790':{'en': 'Cable & Wireless'},
'1876791':{'en': 'Cable & Wireless'},
'1876792':{'en': 'Cable & Wireless'},
'1876793':{'en': 'Cable & Wireless'},
'1876796':{'en': 'Cable & Wireless'},
'1876797':{'en': 'Cable & Wireless'},
'1876798':{'en': 'Cable & Wireless'},
'1876799':{'en': 'Cable & Wireless'},
'187680':{'en': 'Cable & Wireless'},
'1876810':{'en': 'Cable & Wireless'},
'1876812':{'en': 'Cable & Wireless'},
'1876813':{'en': 'Cable & Wireless'},
'1876814':{'en': 'Cable & Wireless'},
'1876815':{'en': 'Cable & Wireless'},
'1876816':{'en': 'Cable & Wireless'},
'1876817':{'en': 'Cable & Wireless'},
'1876818':{'en': 'Cable & Wireless'},
'1876819':{'en': 'Cable & Wireless'},
'187682':{'en': 'Cable & Wireless'},
'187683':{'en': 'Cable & Wireless'},
'187684':{'en': 'Digicel'},
'187685':{'en': 'Digicel'},
'187686':{'en': 'Digicel'},
'187687':{'en': 'Digicel'},
'187688':{'en': 'Digicel'},
'187689':{'en': 'Digicel'},
'1876909':{'en': 'Cable & Wireless'},
'1876919':{'en': 'Cable & Wireless'},
'1876990':{'en': 'Cable & Wireless'},
'1876995':{'en': 'Cable & Wireless'},
'1876997':{'en': 'Cable & Wireless'},
'1876999':{'en': 'Cable & Wireless'},
'1939201':{'en': 'CENTENNIAL'},
'1939212':{'en': 'CENTENNIAL'},
'1939214':{'en': 'CENTENNIAL'},
'1939240':{'en': 'SunCom Wireless Puerto Rico'},
'19392410':{'en': 'Claro'},
'19392411':{'en': 'Claro'},
'19392412':{'en': 'Claro'},
'19392413':{'en': 'Claro'},
'19392414':{'en': 'Claro'},
'19392415':{'en': 'Claro'},
'19392416':{'en': 'Claro'},
'193924199':{'en': 'Claro'},
'1939242':{'en': 'Claro'},
'19392433':{'en': 'Claro'},
'19392434':{'en': 'Claro'},
'19392435':{'en': 'Claro'},
'19392436':{'en': 'Claro'},
'19392437':{'en': 'Claro'},
'19392438':{'en': 'Claro'},
'19392439':{'en': 'Claro'},
'1939244':{'en': 'Claro'},
'1939245':{'en': 'Claro'},
'1939246':{'en': 'Claro'},
'1939247':{'en': 'Claro'},
'1939248':{'en': 'Claro'},
'1939249':{'en': 'Claro'},
'193925':{'en': 'Claro'},
'1939252':{'en': 'CENTENNIAL'},
'1939307':{'en': 'CENTENNIAL'},
'1939325':{'en': 'SunCom Wireless Puerto Rico'},
'1939329':{'en': 'CENTENNIAL'},
'1939334':{'en': 'Claro'},
'1939339':{'en': 'SunCom Wireless Puerto Rico'},
'1939394':{'en': 'CENTENNIAL'},
'1939440':{'en': 'CENTENNIAL'},
'1939628':{'en': 'CENTENNIAL'},
'1939630':{'en': 'CENTENNIAL'},
'1939639':{'en': 'CENTENNIAL'},
'1939640':{'en': 'CENTENNIAL'},
'1939642':{'en': 'CENTENNIAL'},
'1939644':{'en': 'CENTENNIAL'},
'1939645':{'en': 'CENTENNIAL'},
'1939697':{'en': 'CENTENNIAL'},
'1939717':{'en': 'CENTENNIAL'},
'1939731':{'en': 'CENTENNIAL'},
'1939777':{'en': 'Claro'},
'1939865':{'en': 'SunCom Wireless Puerto Rico'},
'1939891':{'en': 'SunCom Wireless Puerto Rico'},
'1939910':{'en': 'CENTENNIAL'},
'1939940':{'en': 'CENTENNIAL'},
'1939969':{'en': 'CENTENNIAL'},
'2010':{'en': 'Vodafone'},
'2011':{'en': 'Etisalat'},
'2012':{'en': 'Orange'},
'2015':{'en': 'TE'},
'21112':{'en': 'Sudatel Group'},
'21191':{'en': 'Zain'},
'21192':{'en': 'MTN'},
'21195':{'en': 'Network of the World'},
'21197':{'en': 'Gemtel'},
'21199':{'en': 'MTN'},
'21260':{'en': 'Inwi'},
'21261':{'en': 'Maroc Telecom'},
'212612':{'en': u('M\u00e9ditel')},
'212614':{'en': u('M\u00e9ditel')},
'212617':{'en': u('M\u00e9ditel')},
'212619':{'en': u('M\u00e9ditel')},
'212620':{'en': u('M\u00e9ditel')},
'212621':{'en': u('M\u00e9ditel')},
'212622':{'en': 'Maroc Telecom'},
'212623':{'en': 'Maroc Telecom'},
'212624':{'en': 'Maroc Telecom'},
'212625':{'en': u('M\u00e9ditel')},
'212626':{'en': 'Inwi'},
'212627':{'en': 'Inwi'},
'212628':{'en': 'Maroc Telecom'},
'212629':{'en': 'Inwi'},
'212630':{'en': 'Inwi'},
'212631':{'en': u('M\u00e9ditel')},
'212632':{'en': u('M\u00e9ditel')},
'212633':{'en': 'Inwi'},
'212634':{'en': 'Inwi'},
'212635':{'en': 'Inwi'},
'212636':{'en': 'Maroc Telecom'},
'212637':{'en': 'Maroc Telecom'},
'212638':{'en': 'Inwi'},
'212639':{'en': 'Maroc Telecom'},
'212640':{'en': 'Inwi'},
'212641':{'en': 'Maroc Telecom'},
'212642':{'en': 'Maroc Telecom'},
'212643':{'en': 'Maroc Telecom'},
'212644':{'en': u('M\u00e9ditel')},
'212645':{'en': u('M\u00e9ditel')},
'212646':{'en': 'Inwi'},
'212647':{'en': 'Inwi'},
'212648':{'en': 'Maroc Telecom'},
'212649':{'en': u('M\u00e9ditel')},
'21265':{'en': 'Maroc Telecom'},
'212656':{'en': u('M\u00e9ditel')},
'212657':{'en': u('M\u00e9ditel')},
'212660':{'en': u('M\u00e9ditel')},
'212661':{'en': 'Maroc Telecom'},
'212662':{'en': 'Maroc Telecom'},
'212663':{'en': u('M\u00e9ditel')},
'212664':{'en': u('M\u00e9ditel')},
'212665':{'en': u('M\u00e9ditel')},
'212666':{'en': 'Maroc Telecom'},
'212667':{'en': 'Maroc Telecom'},
'212668':{'en': 'Maroc Telecom'},
'212669':{'en': u('M\u00e9ditel')},
'21267':{'en': 'Maroc Telecom'},
'212674':{'en': u('M\u00e9ditel')},
'212675':{'en': u('M\u00e9ditel')},
'212679':{'en': u('M\u00e9ditel')},
'212680':{'en': 'Inwi'},
'212681':{'en': 'Inwi'},
'212682':{'en': 'Maroc Telecom'},
'212684':{'en': u('M\u00e9ditel')},
'212687':{'en': 'Inwi'},
'212688':{'en': u('M\u00e9ditel')},
'212689':{'en': 'Maroc Telecom'},
'212690':{'en': 'Inwi'},
'212691':{'en': u('M\u00e9ditel')},
'2126921':{'en': 'Al Hourria Telecom'},
'2126922':{'en': 'Al Hourria Telecom'},
'212693':{'en': u('M\u00e9ditel')},
'212694':{'en': u('M\u00e9ditel')},
'212695':{'en': 'Inwi'},
'212696':{'en': 'Maroc Telecom'},
'212697':{'en': 'Maroc Telecom'},
'212698':{'en': 'Inwi'},
'212699':{'en': 'Inwi'},
'212700':{'en': 'Inwi'},
'212706':{'en': 'Inwi'},
'212707':{'en': 'Inwi'},
'212708':{'en': 'Inwi'},
'21276':{'en': 'Maroc Telecom'},
'21277':{'en': u('M\u00e9ditel')},
'2135':{'en': 'Ooredoo'},
'2136':{'en': 'Mobilis'},
'2137':{'en': 'Djezzy'},
'2162':{'en': 'Ooredoo'},
'21640':{'en': 'Tunisie Telecom'},
'21641':{'en': 'Tunisie Telecom'},
'21642':{'en': 'Tunisie Telecom'},
'21643':{'en': 'Lyca Mobile'},
'21644':{'en': 'Tunisie Telecom'},
'21645':{'en': 'Watany Ettisalat'},
'21646':{'en': 'Ooredoo'},
'21647':{'en': 'Tunisie Telecom'},
'2165':{'en': 'Orange'},
'2169':{'en': 'Tunisie Telecom'},
'21891':{'en': 'Al-Madar'},
'21892':{'en': 'Libyana'},
'21893':{'en': 'Al-Madar'},
'21894':{'en': 'Libyana'},
'21895':{'en': 'Libya Telecom & Technology'},
'21896':{'en': 'Libya Telecom & Technology'},
'2202':{'en': 'Africell'},
'2203':{'en': 'QCell'},
'22050':{'en': 'QCell'},
'22051':{'en': 'QCell'},
'22052':{'en': 'QCell'},
'22053':{'en': 'QCell'},
'22058':{'en': 'QCell'},
'22059':{'en': 'QCell'},
'2206':{'en': 'Comium'},
'2207':{'en': 'Africell'},
'2209':{'en': 'Gamcel'},
'22170':{'en': 'Expresso'},
'22172':{'en': 'HAYO'},
'22176':{'en': 'Tigo'},
'22177':{'en': 'Orange'},
'22178':{'en': 'Orange'},
'22179':{'en': 'ADIE'},
'22220':{'en': 'Chinguitel'},
'22221':{'en': 'Chinguitel'},
'22222':{'en': 'Chinguitel'},
'22223':{'en': 'Chinguitel'},
'22224':{'en': 'Chinguitel'},
'22226':{'en': 'Chinguitel'},
'22227':{'en': 'Chinguitel'},
'22228':{'en': 'Chinguitel'},
'22229':{'en': 'Chinguitel'},
'22230':{'en': 'Mattel'},
'22231':{'en': 'Mattel'},
'22232':{'en': 'Mattel'},
'22233':{'en': 'Mattel'},
'22234':{'en': 'Mattel'},
'22236':{'en': 'Mattel'},
'22237':{'en': 'Mattel'},
'22238':{'en': 'Mattel'},
'22239':{'en': 'Mattel'},
'22240':{'en': 'Mauritel'},
'22241':{'en': 'Mauritel'},
'22242':{'en': 'Mauritel'},
'22243':{'en': 'Mauritel'},
'22244':{'en': 'Mauritel'},
'22246':{'en': 'Mauritel'},
'22247':{'en': 'Mauritel'},
'22248':{'en': 'Mauritel'},
'22249':{'en': 'Mauritel'},
'223200':{'en': 'Orange'},
'2232079':{'en': 'Sotelma'},
'223217':{'en': 'Sotelma'},
'2235':{'en': 'Atel'},
'2236':{'en': 'Sotelma'},
'2237':{'en': 'Orange'},
'22382':{'en': 'Orange'},
'22383':{'en': 'Orange'},
'22389':{'en': 'Sotelma'},
'22390':{'en': 'Orange'},
'22391':{'en': 'Orange'},
'22392':{'en': 'Orange'},
'22393':{'en': 'Orange'},
'22394':{'en': 'Orange'},
'22395':{'en': 'Sotelma'},
'22396':{'en': 'Sotelma'},
'22397':{'en': 'Sotelma'},
'22398':{'en': 'Sotelma'},
'22399':{'en': 'Sotelma'},
'22460':{'en': 'Sotelgui'},
'22462':{'en': 'Orange'},
'22463':{'en': 'Intercel'},
'22465':{'en': 'Cellcom'},
'22466':{'en': 'Areeba'},
'22501':{'en': 'Moov'},
'22502':{'en': 'Moov'},
'22503':{'en': 'Moov'},
'22504':{'en': 'MTN'},
'22505':{'en': 'MTN'},
'22506':{'en': 'MTN'},
'22507':{'en': 'Orange'},
'22508':{'en': 'Orange'},
'22509':{'en': 'Orange'},
'225208':{'en': 'Moov'},
'225218':{'en': 'Moov'},
'225228':{'en': 'Moov'},
'225238':{'en': 'Moov'},
'22540':{'en': 'Moov'},
'22541':{'en': 'Moov'},
'22542':{'en': 'Moov'},
'22543':{'en': 'Moov'},
'22544':{'en': 'MTN'},
'22545':{'en': 'MTN'},
'22546':{'en': 'MTN'},
'22547':{'en': 'Orange'},
'22548':{'en': 'Orange'},
'22549':{'en': 'Orange'},
'22550':{'en': 'Moov'},
'22551':{'en': 'Moov'},
'22552':{'en': 'Moov'},
'22553':{'en': 'Moov'},
'22554':{'en': 'MTN'},
'22555':{'en': 'MTN'},
'22556':{'en': 'MTN'},
'22557':{'en': 'Orange'},
'22558':{'en': 'Orange'},
'22559':{'en': 'Orange'},
'22560':{'en': 'GreenN'},
'22561':{'en': 'GreenN'},
'22564':{'en': 'MTN'},
'22565':{'en': 'MTN'},
'22566':{'en': 'MTN'},
'22567':{'en': 'Orange'},
'22568':{'en': 'Orange'},
'22569':{'en': 'Aircom'},
'22570':{'en': 'Moov'},
'22571':{'en': 'Moov'},
'22572':{'en': 'Moov'},
'22573':{'en': 'Moov'},
'22574':{'en': 'MTN'},
'22575':{'en': 'MTN'},
'22576':{'en': 'MTN'},
'22577':{'en': 'Orange'},
'22578':{'en': 'Orange'},
'22579':{'en': 'Orange'},
'22584':{'en': 'MTN'},
'22585':{'en': 'MTN'},
'22586':{'en': 'MTN'},
'22587':{'en': 'Orange'},
'22588':{'en': 'Orange'},
'22589':{'en': 'Orange'},
'22595':{'en': 'MTN'},
'22597':{'en': 'Orange'},
'22601':{'en': 'Onatel'},
'22602':{'en': 'Onatel'},
'22607':{'en': 'Orange'},
'22651':{'en': 'Telmob'},
'22652':{'en': 'Telmob'},
'22653':{'en': 'Onatel'},
'22654':{'en': 'Orange'},
'22655':{'en': 'Orange'},
'22656':{'en': 'Orange'},
'22657':{'en': 'Orange'},
'22658':{'en': 'Telecel Faso'},
'22660':{'en': 'Telmob'},
'22661':{'en': 'Telmob'},
'22662':{'en': 'Telmob'},
'22663':{'en': 'Telmob'},
'22664':{'en': 'Orange'},
'22665':{'en': 'Orange'},
'22666':{'en': 'Orange'},
'22667':{'en': 'Orange'},
'22668':{'en': 'Telecel Faso'},
'22669':{'en': 'Telecel Faso'},
'22670':{'en': 'Telmob'},
'22671':{'en': 'Telmob'},
'22672':{'en': 'Telmob'},
'22673':{'en': 'Telmob'},
'22674':{'en': 'Orange'},
'22675':{'en': 'Orange'},
'22676':{'en': 'Orange'},
'22677':{'en': 'Orange'},
'22678':{'en': 'Telecel Faso'},
'22679':{'en': 'Telecel Faso'},
'22723':{'en': 'Orange'},
'22780':{'en': 'Orange'},
'22781':{'en': 'Orange'},
'22788':{'en': 'Airtel'},
'22789':{'en': 'Airtel'},
'22790':{'en': 'Orange'},
'22791':{'en': 'Orange'},
'22792':{'en': 'Orange'},
'22793':{'en': 'SahelCom'},
'22794':{'en': 'Moov'},
'22795':{'en': 'Moov'},
'22796':{'en': 'Airtel'},
'22797':{'en': 'Airtel'},
'22798':{'en': 'Airtel'},
'22799':{'en': 'Airtel'},
'22870':{'en': 'TOGOCEL'},
'22879':{'en': 'Moov'},
'22890':{'en': 'TOGOCEL'},
'22891':{'en': 'TOGOCEL'},
'22892':{'en': 'TOGOCEL'},
'22893':{'en': 'TOGOCEL'},
'22896':{'en': 'Moov'},
'22897':{'en': 'TOGOCEL'},
'22898':{'en': 'Moov'},
'22899':{'en': 'Moov'},
'2295':{'en': 'MTN'},
'22960':{'en': 'Moov'},
'22961':{'en': 'MTN'},
'22962':{'en': 'MTN'},
'22963':{'en': 'Moov'},
'22964':{'en': 'Moov'},
'22965':{'en': 'Moov'},
'22966':{'en': 'MTN'},
'22967':{'en': 'MTN'},
'22968':{'en': 'Moov'},
'22969':{'en': 'MTN'},
'22990':{'en': 'Moov'},
'22991':{'en': 'Moov'},
'22993':{'en': 'BLK'},
'22994':{'en': 'Moov'},
'22995':{'en': 'Moov'},
'22997':{'en': 'MTN'},
'22998':{'en': 'Moov'},
'22999':{'en': 'Moov'},
'230525':{'en': 'Cellplus'},
'230528':{'en': 'MTML'},
'230529':{'en': 'MTML'},
'23054':{'en': 'Emtel'},
'2305471':{'en': 'Cellplus'},
'23057':{'en': 'Cellplus'},
'230571':{'en': 'Emtel'},
'230572':{'en': 'Emtel'},
'230573':{'en': 'Emtel'},
'230574':{'en': 'Emtel'},
'230580':{'en': 'Cellplus'},
'230581':{'en': 'Cellplus'},
'230582':{'en': 'Cellplus'},
'230583':{'en': 'Cellplus'},
'230584':{'en': 'Emtel'},
'230585':{'en': 'Emtel'},
'230586':{'en': 'MTML'},
'2305871':{'en': 'MTML'},
'2305875':{'en': 'Cellplus'},
'2305876':{'en': 'Cellplus'},
'2305877':{'en': 'Cellplus'},
'2305878':{'en': 'Cellplus'},
'230588':{'en': 'MTML'},
'230589':{'en': 'MTML'},
'230590':{'en': 'Cellplus'},
'230591':{'en': 'Cellplus'},
'230592':{'en': 'Cellplus'},
'230593':{'en': 'Emtel'},
'230594':{'en': 'Cellplus'},
'230595':{'en': 'MTML'},
'230596':{'en': 'MTML'},
'230597':{'en': 'Emtel'},
'230598':{'en': 'Emtel'},
'231330':{'en': 'West Africa Telecom'},
'231555':{'en': 'Lonestar Cell'},
'2316':{'en': 'Lonestar Cell'},
'2317':{'en': 'Orange'},
'2318':{'en': 'Lonestar Cell'},
'23225':{'en': 'Sierratel'},
'23230':{'en': 'Africell'},
'23231':{'en': 'QCELL'},
'23233':{'en': 'Africell'},
'23234':{'en': 'QCELL'},
'23235':{'en': 'IPTEL'},
'2326':{'en': 'Onlime'},
'23274':{'en': 'Orange'},
'23275':{'en': 'Orange'},
'23276':{'en': 'Orange'},
'23277':{'en': 'Africell'},
'23278':{'en': 'Orange'},
'23279':{'en': 'Orange'},
'2328':{'en': 'Africell'},
'2329':{'en': 'Africell'},
'23320':{'en': 'Vodafone'},
'23323':{'en': 'Globacom (Zain)'},
'23324':{'en': 'MTN'},
'23326':{'en': 'Airtel'},
'23327':{'en': 'tiGO'},
'23328':{'en': 'Expresso'},
'23350':{'en': 'Vodafone'},
'23354':{'en': 'MTN'},
'23355':{'en': 'MTN'},
'23356':{'en': 'Airtel'},
'23357':{'en': 'tiGO'},
'23359':{'en': 'MTN'},
'234701':{'en': 'Airtel'},
'2347020':{'en': 'Smile'},
'2347021':{'en': 'Ntel'},
'2347022':{'en': 'Ntel'},
'2347024':{'en': 'Prestel'},
'2347025':{'en': 'Visafone'},
'2347026':{'en': 'Visafone'},
'2347027':{'en': 'Multilinks'},
'2347028':{'en': 'Starcomms'},
'2347029':{'en': 'Starcomms'},
'234703':{'en': 'MTN'},
'234704':{'en': 'Visafone'},
'234705':{'en': 'Glo'},
'234706':{'en': 'MTN'},
'234708':{'en': 'Airtel'},
'234709':{'en': 'Multilinks'},
'234801':{'en': 'Megatech'},
'234802':{'en': 'Airtel'},
'234803':{'en': 'MTN'},
'234804':{'en': 'Ntel'},
'234805':{'en': 'Glo'},
'234806':{'en': 'MTN'},
'234807':{'en': 'Glo'},
'234808':{'en': 'Airtel'},
'234809':{'en': '9mobile'},
'234810':{'en': 'MTN'},
'234811':{'en': 'Glo'},
'234812':{'en': 'Airtel'},
'234813':{'en': 'MTN'},
'234814':{'en': 'MTN'},
'234815':{'en': 'Glo'},
'234816':{'en': 'MTN'},
'234817':{'en': '9mobile'},
'234818':{'en': '9mobile'},
'234819':{'en': 'Starcomms'},
'234901':{'en': 'Airtel'},
'234902':{'en': 'Airtel'},
'234903':{'en': 'MTN'},
'234904':{'en': 'Airtel'},
'234905':{'en': 'Glo'},
'234906':{'en': 'MTN'},
'234907':{'en': 'Airtel'},
'234908':{'en': '9mobile'},
'234909':{'en': '9mobile'},
'2356':{'en': 'Airtel'},
'2357':{'en': 'Sotel'},
'2359':{'en': 'Tigo'},
'23670':{'en': 'A-Cell'},
'23672':{'en': 'Orange'},
'23675':{'en': 'Telecel'},
'23677':{'en': 'Nationlink'},
'237650':{'en': 'MTN Cameroon'},
'237651':{'en': 'MTN Cameroon'},
'237652':{'en': 'MTN Cameroon'},
'237653':{'en': 'MTN Cameroon'},
'237654':{'en': 'MTN Cameroon'},
'237655':{'en': 'Orange'},
'237656':{'en': 'Orange'},
'237657':{'en': 'Orange'},
'237658':{'en': 'Orange'},
'237659':{'en': 'Orange'},
'23766':{'en': 'NEXTTEL'},
'23767':{'en': 'MTN Cameroon'},
'23768':{'en': 'NEXTTEL'},
'237680':{'en': 'MTN Cameroon'},
'237681':{'en': 'MTN Cameroon'},
'237682':{'en': 'MTN Cameroon'},
'237683':{'en': 'MTN Cameroon'},
'23769':{'en': 'Orange'},
'23833':{'en': 'T+'},
'23836':{'en': 'CVMOVEL'},
'23843':{'en': 'T+'},
'23846':{'en': 'CVMOVEL'},
'23851':{'en': 'T+'},
'23852':{'en': 'T+'},
'23853':{'en': 'T+'},
'23858':{'en': 'CVMOVEL'},
'23859':{'en': 'CVMOVEL'},
'23891':{'en': 'T+'},
'23892':{'en': 'T+'},
'23893':{'en': 'T+'},
'23895':{'en': 'CVMOVEL'},
'23897':{'en': 'CVMOVEL'},
'23898':{'en': 'CVMOVEL'},
'23899':{'en': 'CVMOVEL'},
'23990':{'en': 'Unitel'},
'23998':{'en': 'CSTmovel'},
'23999':{'en': 'CSTmovel'},
'2402':{'en': 'GETESA'},
'240550':{'en': 'Muni'},
'240551':{'en': 'HiTS'},
'24104':{'en': 'Airtel'},
'24105':{'en': 'Moov'},
'24106':{'en': 'Libertis'},
'24107':{'en': 'Airtel'},
'24120':{'en': 'Libertis'},
'24121':{'en': 'Libertis'},
'24122':{'en': 'Libertis'},
'24123':{'en': 'Libertis'},
'24124':{'en': 'Libertis'},
'24125':{'en': 'Libertis'},
'24126':{'en': 'Libertis'},
'24127':{'en': 'Libertis'},
'2413':{'en': 'Libertis'},
'2414':{'en': 'Airtel'},
'2415':{'en': 'Moov'},
'2416':{'en': 'Libertis'},
'24165':{'en': 'Moov'},
'2417':{'en': 'Airtel'},
'24201':{'en': 'Equateur Telecom'},
'24204':{'en': 'Warid'},
'24205':{'en': 'Airtel'},
'24206':{'en': 'MTN'},
'24380':{'en': 'Supercell'},
'24381':{'en': 'Vodacom'},
'24382':{'en': 'Vodacom'},
'24384':{'en': 'CCT'},
'24388':{'en': 'Yozma Timeturns sprl -YTT'},
'24389':{'en': 'Sait-Telecom (Oasis)'},
'24390':{'en': 'Africell'},
'24391':{'en': 'Africell'},
'24397':{'en': 'Zain'},
'24398':{'en': 'Zain'},
'24399':{'en': 'Zain'},
'24491':{'en': 'Movicel'},
'24492':{'en': 'UNITEL'},
'24493':{'en': 'UNITEL'},
'24494':{'en': 'UNITEL'},
'24499':{'en': 'Movicel'},
'24595':{'en': 'Orange'},
'24596':{'en': 'Spacetel'},
'24597':{'en': 'Guinetel'},
'24638':{'en': 'Sure Ltd'},
'24741':{'en': 'Sure South Atlantic'},
'24742':{'en': 'Sure South Atlantic'},
'24743':{'en': 'Sure South Atlantic'},
'24745':{'en': 'Sure South Atlantic'},
'24746':{'en': 'Sure South Atlantic'},
'24747':{'en': 'Sure South Atlantic'},
'24748':{'en': 'Sure South Atlantic'},
'24825':{'en': 'CWS'},
'24826':{'en': 'CWS'},
'24827':{'en': 'Airtel'},
'24828':{'en': 'Airtel'},
'24910':{'en': 'Sudatel'},
'24911':{'en': 'Sudatel'},
'24912':{'en': 'Sudatel'},
'24990':{'en': 'Zain'},
'24991':{'en': 'Zain'},
'24992':{'en': 'MTN'},
'24993':{'en': 'MTN'},
'24995':{'en': 'Network of The World Ltd'},
'24996':{'en': 'Zain'},
'24999':{'en': 'MTN'},
'25072':{'en': 'TIGO'},
'25073':{'en': 'Airtel'},
'25078':{'en': 'MTN'},
'2519':{'en': 'Ethio Telecom'},
'25224':{'en': 'Telesom'},
'25228':{'en': 'Nationlink'},
'25235':{'en': 'AirSom'},
'25239':{'en': 'AirSom'},
'25248':{'en': 'AirSom'},
'25249':{'en': 'AirSom'},
'25262':{'en': 'Somtel'},
'25263':{'en': 'Telesom'},
'25264':{'en': 'Somali Networks'},
'25265':{'en': 'Somtel'},
'25266':{'en': 'Somtel'},
'25267':{'en': 'Nationlink'},
'25268':{'en': 'Nationlink'},
'25269':{'en': 'Nationlink'},
'25279':{'en': 'Somtel'},
'25280':{'en': 'Somali Networks'},
'25288':{'en': 'Somali Networks'},
'2529':{'en': 'STG'},
'25290':{'en': 'Golis Telecom'},
'2537':{'en': 'Evatis'},
'25410':{'en': 'Airtel'},
'25411':{'en': 'Safaricom'},
'25470':{'en': 'Safaricom'},
'25471':{'en': 'Safaricom'},
'25472':{'en': 'Safaricom'},
'25473':{'en': 'Airtel'},
'25474':{'en': 'Safaricom'},
'254744':{'en': 'Homeland Media'},
'254747':{'en': 'JTL'},
'254749':{'en': 'WiAfrica'},
'25475':{'en': 'Airtel'},
'254757':{'en': 'Safaricom'},
'254758':{'en': 'Safaricom'},
'254759':{'en': 'Safaricom'},
'254760':{'en': 'Mobile Pay'},
'254761':{'en': 'Airtel'},
'254762':{'en': 'Airtel'},
'254763':{'en': 'Finserve'},
'254764':{'en': 'Finserve'},
'254765':{'en': 'Finserve'},
'254766':{'en': 'Finserve'},
'254767':{'en': 'Sema Mobile'},
'254768':{'en': 'Safaricom'},
'254769':{'en': 'Safaricom'},
'25477':{'en': 'Telkom'},
'25478':{'en': 'Airtel'},
'25479':{'en': 'Safaricom'},
'25562':{'en': 'Viettel'},
'25563':{'en': 'MTC'},
'25564':{'en': 'Cootel'},
'25565':{'en': 'tiGO'},
'25566':{'en': 'SMILE'},
'25567':{'en': 'tiGO'},
'25568':{'en': 'Airtel'},
'25569':{'en': 'Airtel'},
'25571':{'en': 'tiGO'},
'25573':{'en': 'Tanzania Telecom'},
'25574':{'en': 'Vodacom'},
'25575':{'en': 'Vodacom'},
'25576':{'en': 'Vodacom'},
'25577':{'en': 'Zantel'},
'25578':{'en': 'Airtel'},
'25579':{'en': 'Benson Informatics'},
'25670':{'en': 'Airtel'},
'25671':{'en': 'UTL'},
'256720':{'en': 'Smile'},
'256726':{'en': 'Tangerine'},
'25673':{'en': 'Hamilton Telecom'},
'25674':{'en': 'Sure Telecom'},
'25675':{'en': 'Airtel'},
'25677':{'en': 'MTN'},
'25678':{'en': 'MTN'},
'25679':{'en': 'Africell'},
'25729':{'en': 'Leo'},
'2573':{'en': 'Viettel'},
'2576':{'en': 'Viettel'},
'25771':{'en': 'Leo'},
'25772':{'en': 'Leo'},
'25775':{'en': 'Smart Mobile'},
'25776':{'en': 'Leo'},
'25777':{'en': 'Onatel'},
'25778':{'en': 'Smart Mobile'},
'25779':{'en': 'Leo'},
'25882':{'en': 'mcel'},
'25883':{'en': 'mcel'},
'25884':{'en': 'Vodacom'},
'25885':{'en': 'Vodacom'},
'25886':{'en': 'Movitel'},
'25887':{'en': 'Movitel'},
'25889':{'en': 'GMPCS'},
'26076':{'en': 'MTN'},
'26077':{'en': 'Airtel'},
'26095':{'en': 'ZAMTEL'},
'26096':{'en': 'MTN'},
'26097':{'en': 'Airtel'},
'26132':{'en': 'Orange'},
'26133':{'en': 'Airtel'},
'26134':{'en': 'Telma'},
'26139':{'en': 'Blueline'},
'26263900':{'en': 'Orange'},
'26263901':{'en': 'Orange'},
'26263902':{'en': 'Orange'},
'26263903':{'en': 'Only'},
'26263904':{'en': 'Only'},
'26263905':{'en': 'Only'},
'26263906':{'en': 'Only'},
'26263907':{'en': 'Only'},
'26263909':{'en': 'SFR'},
'26263910':{'en': 'SFR'},
'26263911':{'en': 'SFR'},
'26263919':{'en': 'Only'},
'2626392':{'en': 'SFR'},
'26263926':{'en': 'Only'},
'26263930':{'en': 'BJT'},
'26263939':{'en': 'Only'},
'2626394':{'en': 'SFR'},
'2626395':{'en': 'BJT'},
'26263960':{'en': 'Orange'},
'26263961':{'en': 'Orange'},
'26263962':{'en': 'Orange'},
'26263963':{'en': 'Orange'},
'26263964':{'en': 'Orange'},
'26263965':{'en': 'SFR'},
'26263966':{'en': 'SFR'},
'26263967':{'en': 'SFR'},
'26263968':{'en': 'SFR'},
'26263969':{'en': 'SFR'},
'26263970':{'en': 'BJT'},
'26263971':{'en': 'Only'},
'26263972':{'en': 'Only'},
'26263973':{'en': 'Only'},
'26263974':{'en': 'Only'},
'26263975':{'en': 'Only'},
'26263976':{'en': 'Orange'},
'26263977':{'en': 'Orange'},
'26263978':{'en': 'Orange'},
'26263979':{'en': 'Orange'},
'26263990':{'en': 'BJT'},
'26263994':{'en': 'Only'},
'26263995':{'en': 'Only'},
'26263996':{'en': 'Only'},
'26263997':{'en': 'Only'},
'26263999':{'en': 'Orange'},
'262692':{'en': 'SFR'},
'2626920':{'en': 'Orange'},
'2626922':{'en': 'Orange'},
'2626923':{'en': 'Orange'},
'26269240':{'en': 'Orange'},
'26269241':{'en': 'Orange'},
'26269242':{'en': 'Orange'},
'26269243':{'en': 'Orange'},
'26269244':{'en': 'Orange'},
'26269292':{'en': 'Only'},
'26269293':{'en': 'Only'},
'26269294':{'en': 'Only'},
'26269300':{'en': 'Orange'},
'26269301':{'en': 'SFR'},
'26269302':{'en': 'SFR'},
'26269303':{'en': 'SFR'},
'26269304':{'en': 'SFR'},
'26269306':{'en': 'Orange'},
'26269310':{'en': 'SFR'},
'26269311':{'en': 'Orange'},
'26269313':{'en': 'SFR'},
'26269320':{'en': 'SFR'},
'26269321':{'en': 'Orange'},
'26269322':{'en': 'Orange'},
'26269330':{'en': 'Only'},
'26269331':{'en': 'Only'},
'26269332':{'en': 'Only'},
'26269333':{'en': 'Orange'},
'26269339':{'en': 'Orange'},
'2626934':{'en': 'Only'},
'26269350':{'en': 'Only'},
'26269355':{'en': 'Orange'},
'26269360':{'en': 'Only'},
'26269361':{'en': 'ZEOP Mobile'},
'26269362':{'en': 'ZEOP Mobile'},
'26269366':{'en': 'Orange'},
'26269370':{'en': 'Only'},
'26269371':{'en': 'Only'},
'26269372':{'en': 'Only'},
'26269377':{'en': 'Orange'},
'26269380':{'en': 'Only'},
'26269381':{'en': 'Only'},
'26269382':{'en': 'Only'},
'26269383':{'en': 'Only'},
'26269388':{'en': 'Orange'},
'26269390':{'en': 'Orange'},
'26269391':{'en': 'Orange'},
'26269392':{'en': 'Orange'},
'26269393':{'en': 'Orange'},
'26269394':{'en': 'SFR'},
'26269397':{'en': 'SFR'},
'26269399':{'en': 'Orange'},
'2629':{'en': 'Orange'},
'26371':{'en': 'Net*One'},
'26373':{'en': 'Telecel'},
'26377':{'en': 'Econet'},
'26378':{'en': 'Econet'},
'26460':{'en': 'Telecom Namibia'},
'26481':{'en': 'MTC'},
'26482':{'en': 'Telecom Namibia'},
'26484':{'en': 'MTN'},
'26485':{'en': 'TN Mobile'},
'26511':{'en': 'Malawi Telecom-munications Ltd (MTL)'},
'2653':{'en': 'TNM'},
'2657':{'en': 'Globally Advanced Integrated Networks Ltd'},
'2658':{'en': 'TNM'},
'2659':{'en': 'Airtel'},
'2665':{'en': 'Vodacom Lesotho (Pty) Ltd'},
'2666':{'en': 'Econet Ezi-Cel Lesotho'},
'26771':{'en': 'Mascom'},
'26772':{'en': 'Orange'},
'26773':{'en': 'BTC Mobile'},
'26774':{'en': 'Mascom'},
'267743':{'en': 'Orange'},
'267744':{'en': 'Orange'},
'267748':{'en': 'Orange'},
'267749':{'en': 'BTC Mobile'},
'267750':{'en': 'Orange'},
'267751':{'en': 'Orange'},
'267752':{'en': 'Orange'},
'267753':{'en': 'Orange'},
'267754':{'en': 'Mascom'},
'267755':{'en': 'Mascom'},
'267756':{'en': 'Mascom'},
'267757':{'en': 'Orange'},
'267758':{'en': 'BTC Mobile'},
'267759':{'en': 'Mascom'},
'267760':{'en': 'Mascom'},
'267761':{'en': 'Mascom'},
'267762':{'en': 'Mascom'},
'267763':{'en': 'Orange'},
'267764':{'en': 'Orange'},
'267765':{'en': 'Orange'},
'267766':{'en': 'Mascom'},
'267767':{'en': 'Mascom'},
'267768':{'en': 'BTC Mobile'},
'267769':{'en': 'Orange'},
'267770':{'en': 'Mascom'},
'267771':{'en': 'Mascom'},
'267772':{'en': 'BTC Mobile'},
'267773':{'en': 'Orange'},
'267774':{'en': 'Orange'},
'267775':{'en': 'Orange'},
'267776':{'en': 'Mascom'},
'267777':{'en': 'Mascom'},
'267778':{'en': 'Mascom'},
'267779':{'en': 'Orange'},
'26876':{'en': 'Swazi MTN'},
'26877':{'en': 'SPTC'},
'26878':{'en': 'Swazi MTN'},
'26879':{'en': 'Swazi Mobile Ltd'},
'2693':{'en': 'Comores Telecom'},
'2694':{'en': 'TELCO'},
'2710492':{'en': 'Vodacom'},
'2710493':{'en': 'Vodacom'},
'2710494':{'en': 'Vodacom'},
'2712492':{'en': 'Vodacom'},
'27134920':{'en': 'Vodacom'},
'27134921':{'en': 'Vodacom'},
'27134922':{'en': 'Vodacom'},
'27134925':{'en': 'Vodacom'},
'27144950':{'en': 'Vodacom'},
'27144952':{'en': 'Vodacom'},
'27144953':{'en': 'Vodacom'},
'27144955':{'en': 'Vodacom'},
'27154920':{'en': 'Vodacom'},
'27154950':{'en': 'Vodacom'},
'27154951':{'en': 'Vodacom'},
'27164920':{'en': 'Vodacom'},
'27174920':{'en': 'Vodacom'},
'27184920':{'en': 'Vodacom'},
'2719':{'en': 'Telkom Mobile'},
'2721492':{'en': 'Vodacom'},
'27224950':{'en': 'Vodacom'},
'27274950':{'en': 'Vodacom'},
'27284920':{'en': 'Vodacom'},
'2731492':{'en': 'Vodacom'},
'27324920':{'en': 'Vodacom'},
'27334920':{'en': 'Vodacom'},
'27344920':{'en': 'Vodacom'},
'27354920':{'en': 'Vodacom'},
'27364920':{'en': 'Vodacom'},
'27394920':{'en': 'Vodacom'},
'27404920':{'en': 'Vodacom'},
'2741492':{'en': 'Vodacom'},
'27424920':{'en': 'Vodacom'},
'27434920':{'en': 'Vodacom'},
'27434921':{'en': 'Vodacom'},
'27444920':{'en': 'Vodacom'},
'27444921':{'en': 'Vodacom'},
'27454920':{'en': 'Vodacom'},
'27464920':{'en': 'Vodacom'},
'27474950':{'en': 'Vodacom'},
'27484920':{'en': 'Vodacom'},
'27494920':{'en': 'Vodacom'},
'2751492':{'en': 'Vodacom'},
'27544950':{'en': 'Vodacom'},
'27564920':{'en': 'Vodacom'},
'27574920':{'en': 'Vodacom'},
'27584920':{'en': 'Vodacom'},
'27603':{'en': 'MTN'},
'27604':{'en': 'MTN'},
'27605':{'en': 'MTN'},
'27606':{'en': 'Vodacom'},
'27607':{'en': 'Vodacom'},
'27608':{'en': 'Vodacom'},
'27609':{'en': 'Vodacom'},
'2761':{'en': 'Cell C'},
'27614':{'en': 'Telkom Mobile'},
'2762':{'en': 'Cell C'},
'2763':{'en': 'MTN'},
'27636':{'en': 'Vodacom'},
'27637':{'en': 'Vodacom'},
'27640':{'en': 'MTN'},
'27641':{'en': 'Cell C'},
'27642':{'en': 'Cell C'},
'27643':{'en': 'Cell C'},
'27644':{'en': 'Cell C'},
'27645':{'en': 'Cell C'},
'27646':{'en': 'Vodacom'},
'27647':{'en': 'Vodacom'},
'27648':{'en': 'Vodacom'},
'27649':{'en': 'Vodacom'},
'27650':{'en': 'Cell C'},
'27651':{'en': 'Cell C'},
'27652':{'en': 'Cell C'},
'27653':{'en': 'Cell C'},
'27654':{'en': 'Cell C'},
'27655':{'en': 'MTN'},
'27656':{'en': 'MTN'},
'27657':{'en': 'MTN'},
'27658':{'en': 'Telkom Mobile'},
'27659':{'en': 'Telkom Mobile'},
'27660':{'en': 'Vodacom'},
'27661':{'en': 'Vodacom'},
'27662':{'en': 'Vodacom'},
'27663':{'en': 'Vodacom'},
'27664':{'en': 'Vodacom'},
'27665':{'en': 'Vodacom'},
'27670':{'en': 'Telkom Mobile'},
'27671':{'en': 'Telkom Mobile'},
'27672':{'en': 'Telkom Mobile'},
'27673':{'en': 'Vodacom'},
'27674':{'en': 'Vodacom'},
'27675':{'en': 'Vodacom'},
'27676':{'en': 'Telkom Mobile'},
'27677':{'en': 'Telkom Mobile'},
'2771':{'en': 'Vodacom'},
'27710':{'en': 'MTN'},
'27717':{'en': 'MTN'},
'27718':{'en': 'MTN'},
'27719':{'en': 'MTN'},
'2772':{'en': 'Vodacom'},
'2773':{'en': 'MTN'},
'2774':{'en': 'Cell C'},
'27741':{'en': 'Virgin Mobile'},
'2776':{'en': 'Vodacom'},
'2778':{'en': 'MTN'},
'2779':{'en': 'Vodacom'},
'27810':{'en': 'MTN'},
'27811':{'en': 'Telkom Mobile'},
'27812':{'en': 'Telkom Mobile'},
'27813':{'en': 'Telkom Mobile'},
'27814':{'en': 'Telkom Mobile'},
'27815':{'en': 'Telkom Mobile'},
'27816':{'en': 'WBS Mobile'},
'27817':{'en': 'Telkom Mobile'},
'27818':{'en': 'Vodacom'},
'278190':{'en': 'TelAfrica (Wirles Connect)'},
'278191':{'en': 'TelAfrica (Wirles Connect)'},
'278192':{'en': 'TelAfrica (Wirles Connect)'},
'2782':{'en': 'Vodacom'},
'2783':{'en': 'MTN'},
'2784':{'en': 'Cell C'},
'2787086':{'en': 'Vodacom'},
'2787087':{'en': 'Vodacom'},
'2787158':{'en': 'Vodacom'},
'2787285':{'en': 'Vodacom'},
'2787286':{'en': 'Vodacom'},
'2787287':{'en': 'Vodacom'},
'2787288':{'en': 'Vodacom'},
'2787289':{'en': 'Vodacom'},
'2787310':{'en': 'Vodacom'},
'29051':{'en': 'Sure South Atlantic Ltd'},
'29052':{'en': 'Sure South Atlantic Ltd'},
'29053':{'en': 'Sure South Atlantic Ltd'},
'29054':{'en': 'Sure South Atlantic Ltd'},
'29055':{'en': 'Sure South Atlantic Ltd'},
'29056':{'en': 'Sure South Atlantic Ltd'},
'29057':{'en': 'Sure South Atlantic Ltd'},
'29058':{'en': 'Sure South Atlantic Ltd'},
'29061':{'en': 'Sure South Atlantic Ltd'},
'29062':{'en': 'Sure South Atlantic Ltd'},
'29063':{'en': 'Sure South Atlantic Ltd'},
'29064':{'en': 'Sure South Atlantic Ltd'},
'29065':{'en': 'Sure South Atlantic Ltd'},
'29066':{'en': 'Sure South Atlantic Ltd'},
'29067':{'en': 'Sure South Atlantic Ltd'},
'29068':{'en': 'Sure South Atlantic Ltd'},
'29117':{'en': 'EriTel'},
'2917':{'en': 'EriTel'},
'29729':{'en': 'Digicel'},
'29756':{'en': 'SETAR'},
'29759':{'en': 'SETAR'},
'29760':{'en': 'SETAR'},
'29762':{'en': 'MIO Wireless'},
'29763':{'en': 'MIO Wireless'},
'29764':{'en': 'Digicel'},
'29766':{'en': 'SETAR'},
'297690':{'en': 'SETAR'},
'297699':{'en': 'SETAR'},
'29773':{'en': 'Digicel'},
'29774':{'en': 'Digicel'},
'29777':{'en': 'SETAR'},
'29821':{'en': 'Faroese Telecom'},
'29822':{'en': 'Faroese Telecom'},
'29823':{'en': 'Faroese Telecom'},
'29824':{'en': 'Faroese Telecom'},
'29825':{'en': 'Faroese Telecom'},
'29826':{'en': 'Faroese Telecom'},
'29827':{'en': 'Faroese Telecom'},
'29828':{'en': 'Faroese Telecom'},
'29829':{'en': 'Faroese Telecom'},
'2985':{'en': 'Vodafone'},
'2987':{'en': 'Vodafone'},
'29878':{'en': 'Faroese Telecom'},
'29879':{'en': 'Faroese Telecom'},
'2992':{'en': 'TELE Greenland A/S'},
'2994':{'en': 'TELE Greenland A/S'},
'2995':{'en': 'TELE Greenland A/S'},
'30685185':{'en': 'Cyta'},
'3068519':{'en': 'Cyta'},
'30685500':{'en': 'Cyta'},
'30685501':{'en': 'BWS'},
'30685505':{'en': 'Cyta'},
'30685550':{'en': 'Cyta'},
'30685555':{'en': 'Cyta'},
'30685585':{'en': 'Cyta'},
'30687500':{'en': 'BWS'},
'30688500':{'en': 'BWS'},
'30689900':{'en': 'OTEGlobe'},
'306900':{'en': 'BWS'},
'30690100':{'en': 'MI Carrier Services'},
'30690199':{'en': 'BWS'},
'30690200':{'en': 'MI Carrier Services'},
'30690299':{'en': 'BWS'},
'30690300':{'en': 'MI Carrier Services'},
'30690399':{'en': 'BWS'},
'30690400':{'en': 'MI Carrier Services'},
'30690499':{'en': 'BWS'},
'30690500':{'en': 'MI Carrier Services'},
'30690555':{'en': 'AMD Telecom'},
'30690574':{'en': 'BWS'},
'30690575':{'en': 'BWS'},
'30690588':{'en': 'BWS'},
'30690599':{'en': 'BWS'},
'306906':{'en': 'Wind'},
'306907':{'en': 'Wind'},
'306908':{'en': 'Wind'},
'306909':{'en': 'Wind'},
'30691000':{'en': 'BWS'},
'30691234':{'en': 'M-STAT'},
'30691345':{'en': 'Forthnet'},
'30691400':{'en': 'AMD Telecom'},
'30691600':{'en': 'Compatel'},
'30691700':{'en': 'Inter Telecom'},
'30691888':{'en': 'OSE'},
'30692354':{'en': 'Premium Net International'},
'30692356':{'en': 'SIA NETBALT'},
'30692428':{'en': 'Premium Net International'},
'30693':{'en': 'Wind'},
'30694':{'en': 'Vodafone'},
'306950':{'en': 'Vodafone'},
'306951':{'en': 'Vodafone'},
'30695200':{'en': 'Compatel'},
'3069522':{'en': 'Vodafone'},
'3069523':{'en': 'Vodafone'},
'3069524':{'en': 'BWS'},
'3069529':{'en': 'BWS'},
'3069530':{'en': 'Cyta'},
'30695310':{'en': 'MI Carrier Services'},
'30695328':{'en': 'Premium Net International'},
'30695330':{'en': 'Apifon'},
'30695340':{'en': 'AMD Telecom'},
'30695355':{'en': 'Cyta'},
'30695400':{'en': 'AMD Telecom'},
'30695410':{'en': 'MI Carrier Services'},
'30695456':{'en': 'BWS'},
'30695490':{'en': 'MI Carrier Services'},
'30695499':{'en': 'M-STAT'},
'306955':{'en': 'Vodafone'},
'306956':{'en': 'Vodafone'},
'306957':{'en': 'Vodafone'},
'306958':{'en': 'Vodafone'},
'306959':{'en': 'Vodafone'},
'3069601':{'en': 'OTE'},
'30697':{'en': 'Cosmote'},
'30698':{'en': 'Cosmote'},
'3069900':{'en': 'Wind'},
'30699010':{'en': 'BWS'},
'30699022':{'en': 'Yuboto'},
'30699046':{'en': 'Premium Net International'},
'30699048':{'en': 'AMD Telecom'},
'30699099':{'en': 'BWS'},
'306991':{'en': 'Wind'},
'306992':{'en': 'Wind'},
'306993':{'en': 'Wind'},
'306994':{'en': 'Wind'},
'306995':{'en': 'Wind'},
'306996':{'en': 'Wind'},
'306997':{'en': 'Wind'},
'306998':{'en': 'Wind'},
'306999':{'en': 'Wind'},
'3094':{'en': 'Vodafone'},
'31610':{'en': 'KPN'},
'31611':{'en': 'Vodafone Libertel B.V.'},
'31612':{'en': 'KPN'},
'31613':{'en': 'KPN'},
'31614':{'en': 'T-Mobile'},
'31615':{'en': 'Vodafone Libertel B.V.'},
'31616':{'en': 'Telfort'},
'31617':{'en': 'Telfort'},
'31618':{'en': 'T-Mobile Thuis'},
'31619':{'en': 'KPN'},
'31620':{'en': 'KPN'},
'31621':{'en': 'Vodafone Libertel B.V.'},
'31622':{'en': 'KPN'},
'31623':{'en': 'KPN'},
'31624':{'en': 'T-Mobile'},
'31625':{'en': 'Vodafone Libertel B.V.'},
'31626':{'en': 'Telfort'},
'31627':{'en': 'Vodafone Libertel B.V.'},
'31628':{'en': 'T-Mobile Thuis'},
'31629':{'en': 'Vodafone Libertel B.V.'},
'31630':{'en': 'KPN'},
'31631':{'en': 'Vodafone Libertel B.V.'},
'31633':{'en': 'Telfort'},
'31634':{'en': 'T-Mobile'},
'316351':{'en': 'Glotell B.V (V-Tell NL)'},
'316352':{'en': 'Lancelot'},
'316353':{'en': 'KPN'},
'316356':{'en': 'ASPIDER Solutions Nederland B.V.'},
'316357':{'en': 'ASPIDER Solutions Nederland B.V.'},
'316358':{'en': 'ASPIDER Solutions Nederland B.V.'},
'316359':{'en': 'ASPIDER Solutions Nederland B.V.'},
'31636':{'en': 'Tele2'},
'31637':{'en': 'Teleena (MVNE)'},
'31638':{'en': 'T-Mobile Thuis'},
'31639':{'en': 'T-Mobile Thuis'},
'31640':{'en': 'Tele2'},
'31641':{'en': 'T-Mobile'},
'31642':{'en': 'T-Mobile'},
'31643':{'en': 'T-Mobile'},
'31644':{'en': 'Telfort'},
'31645':{'en': 'Telfort'},
'31646':{'en': 'Vodafone Libertel B.V.'},
'31647':{'en': 'Telfort'},
'31648':{'en': 'T-Mobile Thuis'},
'31649':{'en': 'Telfort'},
'31650':{'en': 'Vodafone Libertel B.V.'},
'31651':{'en': 'KPN'},
'31652':{'en': 'Vodafone Libertel B.V.'},
'31653':{'en': 'KPN'},
'31654':{'en': 'Vodafone Libertel B.V.'},
'31655':{'en': 'Vodafone Libertel B.V.'},
'31656':{'en': 'T-Mobile'},
'31657':{'en': 'KPN'},
'31658':{'en': 'Telfort'},
'316580':{'en': 'Private Mobility Nederland'},
'31659':{'en': 'Vectone Mobile/Delight Mobile'},
'316599':{'en': 'Motto'},
'31680':{'en': 'Vodafone Libertel B.V.'},
'31681':{'en': 'T-Mobile'},
'31682':{'en': 'KPN'},
'31683':{'en': 'KPN'},
'31684':{'en': 'Lycamobile'},
'31685':{'en': 'Lycamobile'},
'31686':{'en': 'Lycamobile'},
'31687':{'en': 'Lycamobile'},
'3245001':{'en': 'Gateway Communications'},
'32455':{'en': 'VOO'},
'32456':{'en': 'Mobile Vikings/JIM Mobile'},
'32460':{'en': 'Proximus'},
'324618':{'en': 'N.M.B.S.'},
'324630':{'en': 'TISMI BV'},
'324651':{'en': 'Lycamobile'},
'324652':{'en': 'Lycamobile'},
'324653':{'en': 'Lycamobile'},
'324654':{'en': 'Lycamobile'},
'324655':{'en': 'Lycamobile'},
'324656':{'en': 'Lycamobile'},
'324657':{'en': 'Lycamobile'},
'324658':{'en': 'Lycamobile'},
'324659':{'en': 'Lycamobile'},
'324660':{'en': 'Lycamobile'},
'324661':{'en': 'Lycamobile'},
'324662':{'en': 'Lycamobile'},
'324663':{'en': 'Lycamobile'},
'324664':{'en': 'Lycamobile'},
'324665':{'en': 'Vectone'},
'324666':{'en': 'Vectone'},
'324667':{'en': 'Vectone'},
'324669':{'en': 'Voxbone SA'},
'324670':{'en': 'Telenet'},
'324671':{'en': 'Join Experience Belgium'},
'324672':{'en': 'Join Experience Belgium'},
'32467306':{'en': 'Telenet'},
'324674':{'en': 'Febo Telecom'},
'324676':{'en': 'Lycamobile'},
'324677':{'en': 'Lycamobile'},
'324678':{'en': 'Lycamobile'},
'324679':{'en': 'Interactive Digital Media GmbH'},
'32468':{'en': 'Telenet'},
'324686':{'en': u('OnOff T\u00e9l\u00e9com SASU')},
'324687':{'en': 'Premium Routing GmbH'},
'324688':{'en': 'Premium Routing GmbH'},
'324689':{'en': 'Febo Telecom'},
'3247':{'en': 'Proximus'},
'324805':{'en': 'Voyacom SPRL'},
'324807':{'en': 'MessageBird BV'},
'324809':{'en': 'Ericsson NV'},
'32483':{'en': 'Telenet'},
'32484':{'en': 'Telenet'},
'32485':{'en': 'Telenet'},
'32486':{'en': 'Telenet'},
'32487':{'en': 'Telenet'},
'32488':{'en': 'Telenet'},
'32489':{'en': 'Telenet'},
'3249':{'en': 'Orange'},
'336000':{'en': 'Free Mobile'},
'336001':{'en': 'Orange France'},
'336002':{'en': 'SFR'},
'336003':{'en': 'Bouygues'},
'3360040':{'en': 'Zeop'},
'3360041':{'en': 'Orange France'},
'3360042':{'en': 'Digicel Antilles Francaises Guyane'},
'3360043':{'en': 'Dauphin Telecom'},
'3360044':{'en': 'OUTREMER TELECOM'},
'3360045':{'en': 'UTS CARAIBES'},
'3360051':{'en': 'Orange France'},
'3360052':{'en': 'SFR'},
'3360053':{'en': 'BJT'},
'3360054':{'en': 'Only (Telco OI)'},
'3360055':{'en': 'Only (Telco OI)'},
'336006':{'en': 'Free Mobile'},
'336007':{'en': 'SFR'},
'336008':{'en': 'Orange France'},
'336009':{'en': 'Bouygues'},
'33601':{'en': 'SFR'},
'33602':{'en': 'SFR'},
'33603':{'en': 'SFR'},
'336040':{'en': 'Afone'},
'336041':{'en': 'Afone'},
'336042':{'en': 'e*Message'},
'336043':{'en': 'e*Message'},
'336044':{'en': 'Afone'},
'336045':{'en': 'SFR'},
'336046':{'en': 'SFR'},
'336047':{'en': 'SFR'},
'336048':{'en': 'SFR'},
'336049':{'en': 'SFR'},
'336050':{'en': 'Euroinformation Telecom'},
'336051':{'en': 'Euroinformation Telecom'},
'336052':{'en': 'Euroinformation Telecom'},
'336053':{'en': 'Euroinformation Telecom'},
'336054':{'en': 'Euroinformation Telecom'},
'336055':{'en': 'Lycamobile'},
'336056':{'en': 'Lycamobile'},
'336057':{'en': 'Lycamobile'},
'336058':{'en': 'Lycamobile'},
'336059':{'en': 'Lycamobile'},
'336060':{'en': 'e*Message'},
'336061':{'en': 'e*Message'},
'336062':{'en': 'e*Message'},
'336063':{'en': 'e*Message'},
'336064':{'en': 'Afone'},
'336065':{'en': 'Euroinformation Telecom'},
'336066':{'en': 'Euroinformation Telecom'},
'336067':{'en': 'Euroinformation Telecom'},
'336068':{'en': 'Euroinformation Telecom'},
'336069':{'en': 'Euroinformation Telecom'},
'33607':{'en': 'Orange France'},
'33608':{'en': 'Orange France'},
'33609':{'en': 'SFR'},
'3361':{'en': 'SFR'},
'3362':{'en': 'SFR'},
'33630':{'en': 'Orange France'},
'33631':{'en': 'Orange France'},
'33632':{'en': 'Orange France'},
'33633':{'en': 'Orange France'},
'33634':{'en': 'SFR'},
'33635':{'en': 'SFR'},
'33636':{'en': 'Euroinformation Telecom'},
'33637':{'en': 'Orange France'},
'33638':{'en': 'Orange France'},
'3363800':{'en': 'Globalstar Europe'},
'3363801':{'en': 'Prixtel'},
'3363802':{'en': 'Prixtel'},
'3363803':{'en': 'Prixtel'},
'3363804':{'en': 'Prixtel'},
'3363805':{'en': 'Prixtel'},
'3363806':{'en': 'IP Directions'},
'3363807':{'en': 'Alphalink'},
'3363808':{'en': 'Alphalink'},
'3363809':{'en': 'Alphalink'},
'33640':{'en': 'Orange France'},
'3364000':{'en': 'Globalstar Europe'},
'3364001':{'en': 'Globalstar Europe'},
'3364002':{'en': 'Globalstar Europe'},
'3364003':{'en': 'Globalstar Europe'},
'3364004':{'en': 'Globalstar Europe'},
'3364005':{'en': 'Coriolis Telecom'},
'3364006':{'en': 'Coriolis Telecom'},
'3364007':{'en': 'Coriolis Telecom'},
'3364008':{'en': 'Coriolis Telecom'},
'3364009':{'en': 'Coriolis Telecom'},
'336410':{'en': 'La poste telecom'},
'336411':{'en': 'La poste telecom'},
'336412':{'en': 'La poste telecom'},
'336413':{'en': 'La poste telecom'},
'336414':{'en': 'La poste telecom'},
'336415':{'en': 'La poste telecom'},
'3364160':{'en': 'Euroinformation Telecom'},
'3364161':{'en': 'Euroinformation Telecom'},
'3364162':{'en': 'Mobiquithings'},
'3364163':{'en': 'SCT'},
'3364164':{'en': 'Legos'},
'3364165':{'en': 'e*Message'},
'3364166':{'en': 'SFR'},
'3364167':{'en': 'SFR'},
'3364168':{'en': 'SFR'},
'3364169':{'en': 'SFR'},
'33642':{'en': 'Orange France'},
'33643':{'en': 'Orange France'},
'336440':{'en': 'La poste telecom'},
'336441':{'en': 'Orange France'},
'336442':{'en': 'Orange France'},
'336443':{'en': 'Orange France'},
'336444':{'en': 'Transatel'},
'336445':{'en': 'Transatel'},
'336446':{'en': 'Transatel'},
'336447':{'en': 'La poste telecom'},
'336448':{'en': 'La poste telecom'},
'336449':{'en': 'La poste telecom'},
'33645':{'en': 'Orange France'},
'33646':{'en': 'SFR'},
'33647':{'en': 'Orange France'},
'33648':{'en': 'Orange France'},
'33649':{'en': 'Orange France'},
'3364950':{'en': 'Keyyo'},
'3364990':{'en': 'Intercall'},
'3364991':{'en': 'Intercall'},
'3364994':{'en': 'e*Message'},
'3364995':{'en': 'Prixtel'},
'3364996':{'en': 'e*Message'},
'3364997':{'en': 'e*Message'},
'3364998':{'en': 'Prixtel'},
'3364999':{'en': 'SFR'},
'33650':{'en': 'Bouygues'},
'33651':{'en': 'Free Mobile'},
'33652':{'en': 'Free Mobile'},
'336530':{'en': 'Bouygues'},
'336531':{'en': 'Bouygues'},
'336532':{'en': 'Bouygues'},
'336533':{'en': 'Bouygues'},
'336534':{'en': 'Bouygues'},
'336535':{'en': 'Free Mobile'},
'336536':{'en': 'Free Mobile'},
'336537':{'en': 'Free Mobile'},
'336538':{'en': 'Free Mobile'},
'336539':{'en': 'Free Mobile'},
'33654':{'en': 'Orange France'},
'33655':{'en': 'SFR'},
'33656':{'en': 'e*Message'},
'3365660':{'en': 'Mobiquithings'},
'3365661':{'en': 'Airbus Defence and Space'},
'3365662':{'en': 'Mobiquithings'},
'3365663':{'en': 'Mobiquithings'},
'3365664':{'en': 'Mobiquithings'},
'3365665':{'en': 'Mobiquithings'},
'3365666':{'en': 'Prixtel'},
'3365667':{'en': 'Prixtel'},
'3365668':{'en': 'Prixtel'},
'3365669':{'en': 'Prixtel'},
'336567':{'en': 'La poste telecom'},
'336568':{'en': 'La poste telecom'},
'33657':{'en': 'e*Message'},
'33658':{'en': 'Bouygues'},
'33659':{'en': 'Bouygues'},
'3366':{'en': 'Bouygues'},
'3367':{'en': 'Orange France'},
'3368':{'en': 'Orange France'},
'33692':{'en': 'Bouygues'},
'33693':{'en': 'Bouygues'},
'33696':{'en': 'Bouygues'},
'33698':{'en': 'Bouygues'},
'33699':{'en': 'Bouygues'},
'33700000':{'en': 'Orange France'},
'33700001':{'en': 'SFR'},
'33700002':{'en': 'Mobiquithings'},
'33700003':{'en': 'Bouygues'},
'33700004':{'en': 'Afone'},
'33700005':{'en': 'Coriolis Telecom'},
'33700006':{'en': 'Mobiquithings'},
'337500':{'en': 'Euroinformation Telecom'},
'337501':{'en': 'SFR'},
'337502':{'en': 'SFR'},
'337503':{'en': 'SFR'},
'337504':{'en': 'SFR'},
'3375050':{'en': 'Euroinformation Telecom'},
'3375051':{'en': 'Euroinformation Telecom'},
'3375052':{'en': 'Euroinformation Telecom'},
'3375053':{'en': 'Euroinformation Telecom'},
'3375057':{'en': 'Euroinformation Telecom'},
'3375058':{'en': 'Euroinformation Telecom'},
'3375059':{'en': 'Sewan communications'},
'337506':{'en': 'Orange France'},
'3375060':{'en': 'Euroinformation Telecom'},
'3375070':{'en': 'Euroinformation Telecom'},
'3375071':{'en': 'Netcom Group'},
'3375072':{'en': 'Netcom Group'},
'3375073':{'en': 'Alphalink'},
'3375074':{'en': 'Alphalink'},
'3375075':{'en': 'Alphalink'},
'3375076':{'en': 'Globalstar Europe'},
'3375077':{'en': 'Globalstar Europe'},
'3375078':{'en': 'China Telecom (France) Limited'},
'3375079':{'en': 'China Telecom (France) Limited'},
'337508':{'en': 'SFR'},
'337509':{'en': 'SFR'},
'33751':{'en': 'Lycamobile'},
'337516':{'en': 'SFR'},
'337517':{'en': 'Completel'},
'337518':{'en': 'Lebara France Limited'},
'337519':{'en': 'Lebara France Limited'},
'3375202':{'en': 'Prixtel'},
'3375203':{'en': 'Prixtel'},
'3375204':{'en': 'Prixtel'},
'3375205':{'en': 'Prixtel'},
'3375206':{'en': 'Prixtel'},
'3375207':{'en': 'Prixtel'},
'3375208':{'en': 'Prixtel'},
'3375209':{'en': 'Prixtel'},
'337521':{'en': 'Lebara France Limited'},
'337522':{'en': 'Lebara France Limited'},
'337523':{'en': 'Lebara France Limited'},
'337524':{'en': 'Lebara France Limited'},
'337525':{'en': 'Lebara France Limited'},
'337526':{'en': 'SFR'},
'337527':{'en': 'Lebara France Limited'},
'337528':{'en': 'Lebara France Limited'},
'337529':{'en': 'Lebara France Limited'},
'33753':{'en': 'Lycamobile'},
'337540':{'en': 'Lebara France Limited'},
'337541':{'en': 'Lebara France Limited'},
'337542':{'en': 'Lebara France Limited'},
'337543':{'en': 'Prixtel'},
'3375430':{'en': 'TDF'},
'3375431':{'en': 'Legos'},
'3375432':{'en': 'Euroinformation Telecom'},
'337544':{'en': 'Lebara France Limited'},
'337545':{'en': 'Lebara France Limited'},
'337546':{'en': 'Mobiquithings'},
'337547':{'en': 'ACN Communications'},
'337548':{'en': 'Completel'},
'337549':{'en': 'Completel'},
'33755':{'en': 'Lebara France Limited'},
'3375550':{'en': 'Legos'},
'3375551':{'en': 'Legos'},
'3375552':{'en': 'Legos'},
'3375553':{'en': 'Legos'},
'3375554':{'en': 'Legos'},
'3375555':{'en': 'Euroinformation Telecom'},
'3375556':{'en': 'Intercall'},
'3375557':{'en': 'Intercall'},
'3375558':{'en': 'Sewan communications'},
'3375559':{'en': 'Sewan communications'},
'3375560':{'en': 'Prixtel'},
'3375561':{'en': 'Prixtel'},
'3375562':{'en': 'Prixtel'},
'3375563':{'en': 'Prixtel'},
'3375564':{'en': 'Prixtel'},
'3375565':{'en': 'Sewan communications'},
'3375566':{'en': 'Euroinformation Telecom'},
'3375567':{'en': 'Euroinformation Telecom'},
'3375568':{'en': 'Euroinformation Telecom'},
'3375569':{'en': 'Axialys'},
'337560':{'en': 'Euroinformation Telecom'},
'337561':{'en': 'Euroinformation Telecom'},
'337562':{'en': 'Euroinformation Telecom'},
'3375630':{'en': 'Euroinformation Telecom'},
'3375631':{'en': 'Euroinformation Telecom'},
'3375632':{'en': 'Euroinformation Telecom'},
'3375633':{'en': 'Euroinformation Telecom'},
'3375634':{'en': 'Euroinformation Telecom'},
'337565':{'en': 'Transatel'},
'337566':{'en': 'Transatel'},
'337567':{'en': 'Transatel'},
'337568':{'en': 'Transatel'},
'337569':{'en': 'Transatel'},
'3375700':{'en': 'Sewan communications'},
'3375701':{'en': 'Mobiweb telecom limited'},
'3375702':{'en': 'Mobiweb telecom limited'},
'3375703':{'en': 'Mobiweb telecom limited'},
'3375704':{'en': 'Mobiweb telecom limited'},
'3375705':{'en': 'Mobiweb telecom limited'},
'3375706':{'en': 'Nordnet'},
'3375707':{'en': 'Keyyo'},
'3375717':{'en': 'Keyyo'},
'337572':{'en': 'Mobiquithings'},
'337573':{'en': 'Mobiquithings'},
'337574':{'en': 'Coriolis Telecom'},
'3375750':{'en': 'Coriolis Telecom'},
'3375751':{'en': 'Coriolis Telecom'},
'3375752':{'en': 'Coriolis Telecom'},
'3375753':{'en': 'Coriolis Telecom'},
'3375754':{'en': 'Coriolis Telecom'},
'3375755':{'en': 'Coriolis Telecom'},
'3375756':{'en': 'Coriolis Telecom'},
'3375757':{'en': 'Euroinformation Telecom'},
'3375758':{'en': 'Euroinformation Telecom'},
'3375763':{'en': 'Euroinformation Telecom'},
'3375767':{'en': 'Euroinformation Telecom'},
'3375777':{'en': 'Euroinformation Telecom'},
'3375779':{'en': 'Halys'},
'3375787':{'en': 'Euroinformation Telecom'},
'3375788':{'en': 'BJT'},
'3375789':{'en': 'BJT'},
'337579':{'en': 'Legos'},
'33758':{'en': 'Lycamobile'},
'33759':{'en': 'Vectone mobile'},
'3376':{'en': 'Bouygues'},
'33766':{'en': 'Free Mobile'},
'33767':{'en': 'Free Mobile'},
'33768':{'en': 'Free Mobile'},
'33769':{'en': 'Free Mobile'},
'337700':{'en': 'Orange France'},
'337701':{'en': 'Orange France'},
'337702':{'en': 'Orange France'},
'337703':{'en': 'SFR'},
'337704':{'en': 'SFR'},
'337705':{'en': 'Euroinformation Telecom'},
'337706':{'en': 'Euroinformation Telecom'},
'337707':{'en': 'Euroinformation Telecom'},
'337708':{'en': 'Euroinformation Telecom'},
'337709':{'en': 'Euroinformation Telecom'},
'337710':{'en': 'Euroinformation Telecom'},
'337711':{'en': 'Euroinformation Telecom'},
'337712':{'en': 'Euroinformation Telecom'},
'337713':{'en': 'SFR'},
'337714':{'en': 'SFR'},
'3377150':{'en': 'SFR'},
'3377151':{'en': 'SFR'},
'3377152':{'en': 'SFR'},
'3377153':{'en': 'SFR'},
'3377154':{'en': 'SFR'},
'3377155':{'en': 'Euroinformation Telecom'},
'3377156':{'en': 'Euroinformation Telecom'},
'3377157':{'en': 'Euroinformation Telecom'},
'3377158':{'en': 'Euroinformation Telecom'},
'3377159':{'en': 'Euroinformation Telecom'},
'337716':{'en': 'Euroinformation Telecom'},
'337717':{'en': 'Euroinformation Telecom'},
'337718':{'en': 'Euroinformation Telecom'},
'3377190':{'en': 'Euroinformation Telecom'},
'3377191':{'en': 'Euroinformation Telecom'},
'3377192':{'en': 'Euroinformation Telecom'},
'3377193':{'en': 'Euroinformation Telecom'},
'3377194':{'en': 'Euroinformation Telecom'},
'33772':{'en': 'Orange France'},
'33773':{'en': 'Syma mobile'},
'33774':{'en': 'Syma mobile'},
'337750':{'en': 'SFR'},
'337751':{'en': 'SFR'},
'337752':{'en': 'SFR'},
'337753':{'en': 'SFR'},
'337754':{'en': 'SFR'},
'337755':{'en': 'Mobiquithings'},
'337756':{'en': 'Mobiquithings'},
'337757':{'en': 'Free Mobile'},
'33776':{'en': 'SFR'},
'33777':{'en': 'SFR'},
'33778':{'en': 'SFR'},
'33779':{'en': 'SFR'},
'3378':{'en': 'Orange France'},
'33780':{'en': 'Afone'},
'337807':{'en': 'Lebara France Limited'},
'337808':{'en': 'Lebara France Limited'},
'337809':{'en': 'Onoff telecom'},
'33781':{'en': 'Free Mobile'},
'33782':{'en': 'Free Mobile'},
'33783':{'en': 'Free Mobile'},
'337846':{'en': 'La poste telecom'},
'337847':{'en': 'La poste telecom'},
'337848':{'en': 'La poste telecom'},
'337849':{'en': 'Euroinformation Telecom'},
'34600':{'en': 'Vodafone'},
'34601':{'en': 'Vodafone'},
'346016':{'en': 'Orange'},
'346018':{'en': 'Orange'},
'346019':{'en': 'Orange'},
'346020':{'en': 'Lycamobile'},
'346021':{'en': 'Lycamobile'},
'3460220':{'en': 'Orange'},
'3460221':{'en': 'Ion mobile'},
'3460222':{'en': 'Vozelia'},
'3460223':{'en': 'Orange'},
'3460224':{'en': 'Oceans'},
'3460225':{'en': 'VozTelecom'},
'3460226':{'en': 'Orange'},
'3460227':{'en': 'Orange'},
'3460228':{'en': 'Orange'},
'3460229':{'en': 'Boutique'},
'346023':{'en': 'Lycamobile'},
'346024':{'en': 'Lebara'},
'346025':{'en': 'Lebara'},
'346026':{'en': 'Lebara'},
'346027':{'en': 'Lebara'},
'346028':{'en': 'Lycamobile'},
'346029':{'en': 'DIA'},
'3460300':{'en': 'Vodafone'},
'3460301':{'en': 'Vodafone'},
'3460302':{'en': 'Vodafone'},
'3460303':{'en': 'Vodafone'},
'3460304':{'en': 'Vodafone'},
'3460305':{'en': 'Lebara'},
'3460306':{'en': 'Lebara'},
'3460307':{'en': 'Lebara'},
'3460308':{'en': 'Lebara'},
'3460309':{'en': 'Lebara'},
'346031':{'en': 'Lebara'},
'346032':{'en': 'Lebara'},
'346033':{'en': 'Lebara'},
'346034':{'en': 'Vodafone'},
'346035':{'en': 'Vodafone'},
'346036':{'en': 'Vodafone'},
'346037':{'en': 'Vodafone'},
'346038':{'en': 'Vodafone'},
'346039':{'en': 'Lebara'},
'34604':{'en': 'Lebara'},
'346040':{'en': 'Orange'},
'346045':{'en': 'Orange'},
'34605':{'en': 'Orange'},
'3460529':{'en': 'MasMovil'},
'34606':{'en': 'Movistar'},
'34607':{'en': 'Vodafone'},
'34608':{'en': 'Movistar'},
'34609':{'en': 'Movistar'},
'34610':{'en': 'Vodafone'},
'34611':{'en': 'Republica Movil'},
'346110':{'en': 'Orange'},
'346112':{'en': 'Lebara'},
'346113':{'en': 'Lebara'},
'34612':{'en': 'Syma'},
'346122':{'en': 'Lycamobile'},
'346124':{'en': 'Lycamobile'},
'346125':{'en': 'Lycamobile'},
'34615':{'en': 'Orange'},
'34616':{'en': 'Movistar'},
'34617':{'en': 'Vodafone'},
'34618':{'en': 'Movistar'},
'34619':{'en': 'Movistar'},
'34620':{'en': 'Movistar'},
'346210':{'en': 'Republica Movil'},
'346211':{'en': 'Republica Movil'},
'346212':{'en': 'Movistar'},
'346213':{'en': 'Republica Movil'},
'346214':{'en': 'Republica Movil'},
'346215':{'en': 'Republica Movil'},
'346216':{'en': 'Republica Movil'},
'34622':{'en': 'Yoigo'},
'346230':{'en': 'Yoigo'},
'346231':{'en': 'Yoigo'},
'346236':{'en': 'Altecom'},
'34625':{'en': 'Orange'},
'3462529':{'en': 'Yoigo'},
'34626':{'en': 'Movistar'},
'34627':{'en': 'Vodafone'},
'34628':{'en': 'Movistar'},
'34629':{'en': 'Movistar'},
'34630':{'en': 'Movistar'},
'34631':{'en': 'Lycamobile'},
'34632':{'en': 'Lycamobile'},
'34633':{'en': 'Yoigo'},
'34634':{'en': 'Vodafone'},
'346340':{'en': 'Lebara'},
'346341':{'en': 'Lebara'},
'346343':{'en': 'Carrier Enabler'},
'346345':{'en': 'Movistar'},
'34635':{'en': 'Orange'},
'3463529':{'en': 'Yoigo'},
'34636':{'en': 'Movistar'},
'34637':{'en': 'Vodafone'},
'34638':{'en': 'Movistar'},
'34639':{'en': 'Movistar'},
'34640':{'en': 'Orange'},
'34641':{'en': 'Movistar'},
'34642':{'en': 'DigiMobil'},
'346430':{'en': 'DigiMobil'},
'346431':{'en': 'DigiMobil'},
'346432':{'en': 'DigiMobil'},
'346433':{'en': 'DigiMobil'},
'346434':{'en': 'DigiMobil'},
'346435':{'en': 'DigiMobil'},
'346436':{'en': 'DigiMobil'},
'346437':{'en': 'DigiMobil'},
'34644':{'en': 'Orange'},
'34645':{'en': 'Orange'},
'3464529':{'en': 'Yoigo'},
'34646':{'en': 'Movistar'},
'34647':{'en': 'Vodafone'},
'34648':{'en': 'Movistar'},
'34649':{'en': 'Movistar'},
'3465':{'en': 'Orange'},
'34650':{'en': 'Movistar'},
'3465229':{'en': 'Yoigo'},
'3465329':{'en': 'DIA'},
'3465429':{'en': 'DIA'},
'3465529':{'en': 'DIA'},
'3465729':{'en': 'DIA'},
'3465829':{'en': 'DIA'},
'34659':{'en': 'Movistar'},
'34660':{'en': 'Movistar'},
'34661':{'en': 'Vodafone'},
'34662':{'en': 'Vodafone'},
'34663':{'en': 'Vodafone'},
'34664':{'en': 'Vodafone'},
'34665':{'en': 'Orange'},
'34666':{'en': 'Vodafone'},
'34667':{'en': 'Vodafone'},
'346681':{'en': 'Truphone'},
'346685':{'en': 'Orange'},
'346686':{'en': 'Parlem'},
'346688':{'en': 'Parlem'},
'34669':{'en': 'Movistar'},
'3467':{'en': 'Vodafone'},
'346725':{'en': 'Lebara'},
'346728':{'en': 'Lebara'},
'346729':{'en': 'Lebara'},
'34675':{'en': 'Orange'},
'34676':{'en': 'Movistar'},
'34679':{'en': 'Movistar'},
'34680':{'en': 'Movistar'},
'346810':{'en': 'Movistar'},
'346811':{'en': 'Movistar'},
'346812':{'en': 'Movistar'},
'346813':{'en': 'Movistar'},
'346814':{'en': 'Movistar'},
'346815':{'en': 'Movistar'},
'346816':{'en': 'Yoigo'},
'34682':{'en': 'Movistar'},
'34683':{'en': 'Movistar'},
'346840':{'en': 'Movistar'},
'346841':{'en': 'Movistar'},
'346842':{'en': 'Movistar'},
'346843':{'en': 'Movistar'},
'3468440':{'en': 'Eurona'},
'3468441':{'en': 'Lemonvil'},
'3468442':{'en': 'BluePhone'},
'3468443':{'en': 'BT'},
'3468444':{'en': 'BT'},
'3468445':{'en': 'Aire Networks'},
'3468447':{'en': 'Quattre'},
'3468448':{'en': 'Nethits'},
'346845':{'en': 'Movistar'},
'346846':{'en': 'Telecable'},
'34685':{'en': 'Orange'},
'3468529':{'en': 'Carrefour'},
'34686':{'en': 'Movistar'},
'34687':{'en': 'Vodafone'},
'346880':{'en': 'YouMobile'},
'346881':{'en': 'YouMobile'},
'346882':{'en': 'Yoigo'},
'346883':{'en': 'Yoigo'},
'346884':{'en': 'Yoigo'},
'346885':{'en': 'YouMobile'},
'346886':{'en': 'Euskaltel'},
'346887':{'en': 'Euskaltel'},
'3468870':{'en': 'OpenMovil'},
'346888':{'en': 'Euskaltel'},
'3468883':{'en': 'Sarenet'},
'346889':{'en': 'PepePhone'},
'34689':{'en': 'Movistar'},
'34690':{'en': 'Movistar'},
'34691':{'en': 'Orange'},
'346919':{'en': 'Yoigo'},
'3469190':{'en': 'MasMovil'},
'3469198':{'en': 'Carrefour'},
'3469199':{'en': 'Carrefour'},
'34692':{'en': 'Orange'},
'3469229':{'en': 'Carrefour'},
'346927':{'en': 'Carrefour'},
'3469300':{'en': 'MasMovil'},
'3469301':{'en': 'Yoigo'},
'3469302':{'en': 'Yoigo'},
'3469303':{'en': 'Yoigo'},
'3469304':{'en': 'Yoigo'},
'3469305':{'en': 'Yoigo'},
'3469306':{'en': 'Yoigo'},
'346931':{'en': 'Orange'},
'3469310':{'en': 'MasMovil'},
'346932':{'en': 'Yoigo'},
'3469320':{'en': 'Carrefour'},
'3469321':{'en': 'Carrefour'},
'3469329':{'en': 'Orange'},
'346933':{'en': 'Carrefour'},
'3469336':{'en': 'Yoigo'},
'3469337':{'en': 'Yoigo'},
'3469340':{'en': 'DIA'},
'3469341':{'en': 'DIA'},
'3469342':{'en': 'DIA'},
'3469343':{'en': 'DIA'},
'3469344':{'en': 'DIA'},
'3469345':{'en': 'Yoigo'},
'3469346':{'en': 'Yoigo'},
'3469347':{'en': 'Yoigo'},
'3469348':{'en': 'Yoigo'},
'3469349':{'en': 'Yoigo'},
'346935':{'en': 'Yoigo'},
'3469360':{'en': 'DIA'},
'3469361':{'en': 'DIA'},
'3469362':{'en': 'DIA'},
'3469363':{'en': 'DIA'},
'3469364':{'en': 'DIA'},
'3469365':{'en': 'Carrefour'},
'3469366':{'en': 'Carrefour'},
'3469367':{'en': 'Yoigo'},
'3469368':{'en': 'Yoigo'},
'3469369':{'en': 'Yoigo'},
'346937':{'en': 'Yoigo'},
'346938':{'en': 'Yoigo'},
'346939':{'en': 'Yoigo'},
'34694':{'en': 'Movistar'},
'346944':{'en': 'Yoigo'},
'346945':{'en': 'Yoigo'},
'346946':{'en': 'Yoigo'},
'34695':{'en': 'Orange'},
'34696':{'en': 'Movistar'},
'34697':{'en': 'Vodafone'},
'34698':{'en': 'Yoigo'},
'346981':{'en': 'R'},
'346989':{'en': 'Vodafone'},
'34699':{'en': 'Movistar'},
'347110':{'en': 'Zinnia'},
'347111':{'en': 'Vodafone'},
'347117':{'en': 'Vodafone'},
'347121':{'en': 'Yoigo'},
'347122':{'en': 'Yoigo'},
'347123':{'en': 'Yoigo'},
'347124':{'en': 'Yoigo'},
'347125':{'en': 'Yoigo'},
'347126':{'en': 'Yoigo'},
'347127':{'en': 'Yoigo'},
'347128':{'en': 'Yoigo'},
'347170':{'en': 'Movistar'},
'347171':{'en': 'Vodafone'},
'347177':{'en': 'Movistar'},
'3471770':{'en': 'PepePhone'},
'3471771':{'en': 'PepePhone'},
'3471777':{'en': 'PepePhone'},
'347221':{'en': 'Yoigo'},
'347222':{'en': 'Yoigo'},
'347223':{'en': 'Yoigo'},
'347224':{'en': 'Yoigo'},
'347225':{'en': 'Yoigo'},
'347226':{'en': 'Yoigo'},
'3472260':{'en': 'MasMovil'},
'3472261':{'en': 'PepePhone'},
'347227':{'en': 'Yoigo'},
'347228':{'en': 'Yoigo'},
'347277':{'en': 'Vodafone'},
'3474442':{'en': 'Deion'},
'3474443':{'en': 'InfoVOIP'},
'3474447':{'en': 'Jetnet'},
'3474448':{'en': 'Aire Networks'},
'3474449':{'en': 'Alai'},
'347446':{'en': 'PTV'},
'347477':{'en': 'Orange'},
'347478':{'en': 'Orange'},
'3505':{'en': 'GibTel'},
'35060':{'en': 'GibTel'},
'35062':{'en': 'Limba'},
'351609':{'en': 'NOS'},
'35163':{'en': 'NOS'},
'35165':{'en': 'NOS'},
'35166':{'en': 'NOS'},
'35191':{'en': 'Vodafone'},
'3519200':{'en': 'Lycamobile'},
'3519201':{'en': 'Lycamobile'},
'3519202':{'en': 'Lycamobile'},
'3519203':{'en': 'Lycamobile'},
'3519204':{'en': 'Lycamobile'},
'3519205':{'en': 'Lycamobile'},
'351921':{'en': 'Vodafone'},
'3519220':{'en': 'Vodafone'},
'3519221':{'en': 'MEO'},
'3519222':{'en': 'MEO'},
'3519230':{'en': 'NOS'},
'3519231':{'en': 'NOS'},
'3519232':{'en': 'NOS'},
'3519233':{'en': 'NOS'},
'3519234':{'en': 'NOS'},
'3519240':{'en': 'MEO'},
'3519241':{'en': 'MEO'},
'3519242':{'en': 'MEO'},
'3519243':{'en': 'MEO'},
'3519244':{'en': 'MEO'},
'351925':{'en': 'MEO'},
'351926':{'en': 'MEO'},
'351927':{'en': 'MEO'},
'3519280':{'en': 'NOWO'},
'3519281':{'en': 'NOWO'},
'3519285':{'en': 'ONITELECOM'},
'3519290':{'en': 'NOS'},
'3519291':{'en': 'NOS'},
'3519292':{'en': 'NOS'},
'3519293':{'en': 'NOS'},
'3519294':{'en': 'NOS'},
'35193':{'en': 'NOS'},
'35196':{'en': 'MEO'},
'35262':{'en': 'POST'},
'352651':{'en': 'POST'},
'352658':{'en': 'POST'},
'35266':{'en': 'Orange'},
'352671':{'en': 'JOIN'},
'352678':{'en': 'JOIN'},
'35269':{'en': 'Tango'},
'35383':{'en': '3'},
'35385':{'en': 'Meteor'},
'35386':{'en': 'O2'},
'35387':{'en': 'Vodafone'},
'35388':{'en': 'eMobile'},
'35389':{'en': 'Tesco Mobile'},
'3538900':{'en': 'Eircom'},
'353892':{'en': 'Liffey Telecom'},
'353894':{'en': 'Liffey Telecom'},
'353895':{'en': '3'},
'3538960':{'en': 'Virgin Media'},
'3538961':{'en': 'Virgin Media'},
'3538962':{'en': 'Virgin Media'},
'3538970':{'en': 'Carphone Warehouse Ireland Mobile Limited'},
'3538971':{'en': 'Carphone Warehouse Ireland Mobile Limited'},
'3538994':{'en': 'Lycamobile'},
'3538995':{'en': 'Lycamobile'},
'3538996':{'en': 'Lycamobile'},
'3538997':{'en': 'Lycamobile'},
'3538998':{'en': 'Lycamobile'},
'354385':{'en': u('S\u00edminn')},
'354388':{'en': 'IMC'},
'354389':{'en': 'IMC'},
'35461':{'en': 'Vodafone'},
'35462':{'en': 'Vodafone'},
'354630':{'en': 'IMC'},
'354632':{'en': 'Tismi'},
'354637':{'en': u('\u00d6ryggisfjarskipti')},
'354638':{'en': u('\u00d6ryggisfjarskipti')},
'354639':{'en': u('\u00d6ryggisfjarskipti')},
'354640':{'en': u('\u00d6ryggisfjarskipti')},
'354641':{'en': u('\u00d6ryggisfjarskipti')},
'354644':{'en': 'Nova'},
'354646':{'en': 'IMC'},
'354647':{'en': 'IMC'},
'354649':{'en': 'Vodafone'},
'354650':{'en': 'IMC'},
'354651':{'en': 'IMC'},
'354655':{'en': 'Vodafone'},
'354659':{'en': 'Vodafone'},
'35466':{'en': 'Vodafone'},
'35467':{'en': 'Vodafone'},
'354680':{'en': 'Vodafone'},
'354686':{'en': 'Vodafone'},
'354687':{'en': 'Vodafone'},
'354688':{'en': 'Vodafone'},
'35469':{'en': 'Vodafone'},
'354750':{'en': u('S\u00edminn')},
'354755':{'en': u('S\u00edminn')},
'354757':{'en': 'Vodafone'},
'35476':{'en': 'Nova'},
'35477':{'en': 'Nova'},
'35478':{'en': 'Nova'},
'35479':{'en': 'Nova'},
'35482':{'en': 'Vodafone'},
'35483':{'en': u('S\u00edminn')},
'35484':{'en': u('S\u00edminn')},
'35485':{'en': u('S\u00edminn')},
'35486':{'en': u('S\u00edminn')},
'354882':{'en': u('S\u00edminn')},
'354888':{'en': u('S\u00edminn')},
'35489':{'en': u('S\u00edminn')},
'35567':{'en': 'ALBtelecom'},
'35568':{'en': 'Telekom'},
'35569':{'en': 'Vodafone'},
'35672':{'en': 'GO Mobile'},
'35677':{'en': 'Melita Mobile'},
'35679':{'en': 'GO Mobile'},
'35692':{'en': 'Vodafone'},
'35696':{'en': 'YOM'},
'356981':{'en': 'Melita Mobile'},
'356988':{'en': 'GO Mobile'},
'356989':{'en': 'Vodafone'},
'35699':{'en': 'Vodafone'},
'35794':{'en': 'Lemontel'},
'35795':{'en': 'PrimeTel'},
'35796':{'en': 'MTN'},
'35797':{'en': 'Cytamobile-Vodafone'},
'35799':{'en': 'Cytamobile-Vodafone'},
'35840':{'en': 'Telia'},
'35841':{'en': 'DNA'},
'35842':{'en': 'Telia'},
'3584320':{'en': 'Cuuma'},
'3584321':{'en': 'Cuuma'},
'3584322':{'en': 'Benemen Oy'},
'3584323':{'en': 'Top Connect OU'},
'3584324':{'en': 'Nord Connect SIA'},
'358436':{'en': 'DNA'},
'358438':{'en': 'DNA'},
'35844':{'en': 'DNA'},
'358450':{'en': 'Telia'},
'358451':{'en': 'Elisa'},
'358452':{'en': 'Elisa'},
'358453':{'en': 'Elisa'},
'3584540':{'en': 'MobiWeb'},
'3584541':{'en': 'AinaCom'},
'3584542':{'en': 'Nokia'},
'3584543':{'en': 'Nokia'},
'3584544':{'en': 'Nokia'},
'3584545':{'en': 'Interactive Digital Media'},
'3584546':{'en': 'NextGen Mobile / CardBoardFish'},
'3584547':{'en': 'SMS Provider Corp'},
'3584548':{'en': 'Voxbone'},
'3584549':{'en': 'Beepsend'},
'3584550':{'en': 'Suomen Virveverkko'},
'3584552':{'en': 'Suomen Virveverkko'},
'3584554':{'en': 'Suomen Virveverkko'},
'3584555':{'en': 'Nokia Solutions and Networks'},
'3584556':{'en': 'Liikennevirasto'},
'3584557':{'en': 'Compatel'},
'3584558':{'en': 'Suomen Virveverkko'},
'3584559':{'en': 'MI'},
'358456':{'en': 'Elisa'},
'3584570':{'en': 'AMT'},
'3584571':{'en': 'Tismi'},
'3584572':{'en': 'Telavox AB'},
'3584573':{'en': 'AMT'},
'3584574':{'en': 'DNA'},
'3584575':{'en': 'AMT'},
'3584576':{'en': 'DNA'},
'3584577':{'en': 'DNA'},
'3584578':{'en': 'DNA'},
'3584579':{'en': 'DNA'},
'358458':{'en': 'Elisa'},
'35846':{'en': 'Elisa'},
'35850':{'en': 'Elisa'},
'35987':{'en': 'Vivacom'},
'35988':{'en': 'A1'},
'35989':{'en': 'Telenor'},
'359988':{'en': 'Bob'},
'359989':{'en': 'A1'},
'359996':{'en': 'Bulsatcom'},
'359999':{'en': 'MAX'},
'3620':{'en': 'Telenor'},
'3630':{'en': 'Magyar Telekom'},
'36312000':{'en': 'Netfone Telecom'},
'36312001':{'en': 'Netfone Telecom'},
'3631310':{'en': 'Vodafone'},
'3631311':{'en': 'Vodafone'},
'3631312':{'en': 'Vodafone'},
'3631313':{'en': 'Vodafone'},
'3631314':{'en': 'Vodafone'},
'3631315':{'en': 'Vodafone'},
'3631316':{'en': 'Vodafone'},
'3631317':{'en': 'Vodafone'},
'3631318':{'en': 'Vodafone'},
'36313190':{'en': 'Vodafone'},
'36313191':{'en': 'Vodafone'},
'36313192':{'en': 'Vodafone'},
'36313193':{'en': 'Vodafone'},
'36313194':{'en': 'Vodafone'},
'36313195':{'en': 'Vodafone'},
'36313196':{'en': 'Vodafone'},
'36313197':{'en': 'Vodafone'},
'36313199':{'en': 'Vodafone'},
'3631320':{'en': 'Vodafone'},
'3631321':{'en': 'Vodafone'},
'3631322':{'en': 'Vodafone'},
'3631323':{'en': 'Vodafone'},
'3631324':{'en': 'Vodafone'},
'3631325':{'en': 'Vodafone'},
'3631326':{'en': 'Vodafone'},
'3631327':{'en': 'Vodafone'},
'3631328':{'en': 'Vodafone'},
'36313290':{'en': 'Vodafone'},
'36313291':{'en': 'Vodafone'},
'36313292':{'en': 'Vodafone'},
'3631330':{'en': 'Vodafone'},
'3631331':{'en': 'Vodafone'},
'3631332':{'en': 'Vodafone'},
'36313330':{'en': 'Vidanet'},
'36313331':{'en': 'Vidanet'},
'36313666':{'en': 'Vodafone'},
'36317000':{'en': 'TARR'},
'36317001':{'en': 'TARR'},
'36317002':{'en': 'TARR'},
'36317003':{'en': 'TARR'},
'36317004':{'en': 'TARR'},
'3631770':{'en': 'UPC'},
'3631771':{'en': 'UPC'},
'363178':{'en': 'UPC'},
'3631790':{'en': 'UPC'},
'36501':{'en': 'DIGI'},
'36502':{'en': 'DIGI'},
'3670':{'en': 'Vodafone'},
'37060':{'en': 'Tele 2'},
'37061':{'en': 'Omnitel'},
'37062':{'en': 'Omnitel'},
'37063':{'en': u('BIT\u00c4')},
'37064':{'en': u('BIT\u00c4')},
'370645':{'en': 'Tele 2'},
'370646':{'en': 'Tele 2'},
'370647':{'en': 'Tele 2'},
'370648':{'en': 'Tele 2'},
'37065':{'en': u('BIT\u00c4')},
'370660':{'en': u('BIT\u00c4')},
'370661':{'en': u('BIT\u00c4')},
'3706610':{'en': 'Tele 2'},
'370662':{'en': 'Omnitel'},
'37066313':{'en': u('BIT\u00c4')},
'37066314':{'en': u('BIT\u00c4')},
'37066315':{'en': u('BIT\u00c4')},
'37066316':{'en': u('BIT\u00c4')},
'37066317':{'en': u('BIT\u00c4')},
'37066318':{'en': u('BIT\u00c4')},
'37066319':{'en': u('BIT\u00c4')},
'37066320':{'en': u('BIT\u00c4')},
'37066323':{'en': u('BIT\u00c4')},
'37066522':{'en': u('BIT\u00c4')},
'3706660':{'en': u('BIT\u00c4')},
'3706661':{'en': u('BIT\u00c4')},
'37066622':{'en': u('BIT\u00c4')},
'37066623':{'en': u('BIT\u00c4')},
'37066624':{'en': u('BIT\u00c4')},
'37066625':{'en': u('BIT\u00c4')},
'37066626':{'en': u('BIT\u00c4')},
'37066627':{'en': u('BIT\u00c4')},
'37066628':{'en': u('BIT\u00c4')},
'37066629':{'en': u('BIT\u00c4')},
'3706665':{'en': u('BIT\u00c4')},
'3706666':{'en': 'Tele 2'},
'3706667':{'en': u('BIT\u00c4')},
'3706668':{'en': u('BIT\u00c4')},
'3706669':{'en': u('BIT\u00c4')},
'3706670':{'en': u('BIT\u00c4')},
'37066711':{'en': u('BIT\u00c4')},
'37066719':{'en': u('BIT\u00c4')},
'37066728':{'en': u('BIT\u00c4')},
'37066729':{'en': u('BIT\u00c4')},
'3706676':{'en': u('BIT\u00c4')},
'3706677':{'en': u('BIT\u00c4')},
'3706678':{'en': u('BIT\u00c4')},
'3706679':{'en': u('BIT\u00c4')},
'3706680':{'en': 'Tele 2'},
'37066839':{'en': 'Tele 2'},
'37066840':{'en': 'Tele 2'},
'37066841':{'en': 'Tele 2'},
'37066842':{'en': 'Tele 2'},
'37066860':{'en': 'Tele 2'},
'37066861':{'en': 'Tele 2'},
'37066862':{'en': 'Tele 2'},
'37066863':{'en': 'Tele 2'},
'37066864':{'en': 'Tele 2'},
'37066865':{'en': 'Tele 2'},
'37066876':{'en': u('BIT\u00c4')},
'37066877':{'en': u('BIT\u00c4')},
'37066900':{'en': u('BIT\u00c4')},
'3706696':{'en': u('BIT\u00c4')},
'3706697':{'en': u('BIT\u00c4')},
'3706698':{'en': u('BIT\u00c4')},
'3706699':{'en': u('BIT\u00c4')},
'37067':{'en': 'Tele 2'},
'370680':{'en': 'Omnitel'},
'370681':{'en': u('BIT\u00c4')},
'370682':{'en': 'Omnitel'},
'370683':{'en': 'Tele 2'},
'370684':{'en': 'Tele 2'},
'370685':{'en': u('BIT\u00c4')},
'370686':{'en': 'Omnitel'},
'370687':{'en': 'Omnitel'},
'370688':{'en': 'Omnitel'},
'370689':{'en': u('BIT\u00c4')},
'370690':{'en': u('BIT\u00c4')},
'370691':{'en': u('BIT\u00c4')},
'370692':{'en': 'Omnitel'},
'370693':{'en': 'Omnitel'},
'370694':{'en': 'Omnitel'},
'370695':{'en': 'Omnitel'},
'370696':{'en': 'Omnitel'},
'37069742':{'en': u('BIT\u00c4')},
'37069743':{'en': u('BIT\u00c4')},
'370698':{'en': 'Omnitel'},
'370699':{'en': u('BIT\u00c4')},
'37250':{'en': 'Telia Eesti AS'},
'372519':{'en': 'Telia Eesti AS'},
'37252':{'en': 'Telia Eesti AS'},
'372530':{'en': 'Telia Eesti AS'},
'372533':{'en': 'Telia Eesti AS'},
'372534':{'en': 'Telia Eesti AS'},
'372536':{'en': 'Telia Eesti AS'},
'372537':{'en': 'Telia Eesti AS'},
'372538':{'en': 'Telia Eesti AS'},
'372539':{'en': 'Telia Eesti AS'},
'37254':{'en': 'Telia Eesti AS'},
'372545':{'en': 'Elisa'},
'3725461':{'en': 'Elisa'},
'3725462':{'en': 'Elisa'},
'3725463':{'en': 'Elisa'},
'37254664':{'en': 'Elisa'},
'37254665':{'en': 'Elisa'},
'37254667':{'en': 'Elisa'},
'37254668':{'en': 'Elisa'},
'37254669':{'en': 'Elisa'},
'37255':{'en': 'Tele 2'},
'37256':{'en': 'Elisa'},
'37257':{'en': 'Telia Eesti AS'},
'37258':{'en': 'Tele 2'},
'372589':{'en': 'Elisa'},
'37259':{'en': 'Telia Eesti AS'},
'37259120':{'en': 'Tele 2'},
'37259121':{'en': 'Tele 2'},
'37259140':{'en': 'Tele 2'},
'372591410':{'en': 'Tele 2'},
'372591411':{'en': 'Tele 2'},
'372591412':{'en': 'Tele 2'},
'372591413':{'en': 'Tele 2'},
'37259144':{'en': 'Tele 2'},
'37281':{'en': 'Telia Eesti AS'},
'3728110':{'en': 'Tele 2'},
'3728111':{'en': 'Elisa'},
'37282':{'en': 'Elisa'},
'3728200':{'en': 'Telia Eesti AS'},
'3728204':{'en': 'Tele 2'},
'37282056':{'en': 'Tele 2'},
'37282057':{'en': 'Tele 2'},
'37282058':{'en': 'Tele 2'},
'37282059':{'en': 'Tele 2'},
'3728206':{'en': 'Tele 2'},
'3728216':{'en': 'Tele 2'},
'3728217':{'en': 'Tele 2'},
'3728218':{'en': 'Tele 2'},
'37282199':{'en': 'Tele 2'},
'3728282':{'en': 'Telia Eesti AS'},
'37283':{'en': 'Tele 2'},
'37284':{'en': 'Tele 2'},
'37284510':{'en': 'Telia Eesti AS'},
'37284511':{'en': 'Telia Eesti AS'},
'37284512':{'en': 'Telia Eesti AS'},
'37356':{'en': 'IDC'},
'37360':{'en': 'Orange'},
'373610':{'en': 'Orange'},
'373611':{'en': 'Orange'},
'373620':{'en': 'Orange'},
'373621':{'en': 'Orange'},
'37367':{'en': 'Moldtelecom'},
'37368':{'en': 'Orange'},
'37369':{'en': 'Orange'},
'37376':{'en': 'Moldcell'},
'373774':{'en': 'IDC'},
'373775':{'en': 'IDC'},
'373777':{'en': 'IDC'},
'373778':{'en': 'IDC'},
'373779':{'en': 'IDC'},
'37378':{'en': 'Moldcell'},
'37379':{'en': 'Moldcell'},
'37433':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37441':{'en': 'Ucom', 'ru': u('\u042e\u043a\u043e\u043c')},
'37443':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37444':{'en': 'Ucom', 'ru': u('\u042e\u043a\u043e\u043c')},
'37449':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'3745':{'en': 'Ucom', 'ru': u('\u042e\u043a\u043e\u043c')},
'3747':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37488':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37491':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37493':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37494':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37495':{'en': 'Ucom', 'ru': u('\u042e\u043a\u043e\u043c')},
'37496':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37498':{'en': 'VivaCell-MTS', 'ru': u('\u0412\u0438\u0432\u0430\u0421\u0435\u043b\u043b-\u041c\u0422\u0421')},
'37499':{'en': 'Beeline', 'ru': u('\u0411\u0438\u043b\u0430\u0439\u043d')},
'37525':{'be': u('\u0411\u0435\u0421\u0422'), 'en': 'life:)', 'ru': 'life:)'},
'375291':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'375292':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'375293':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'375294':{'be': u('\u0411\u0435\u043b\u0421\u0435\u043b'), 'en': 'Belcel', 'ru': u('\u0411\u0435\u043b\u0421\u0435\u043b')},
'375295':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'375296':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'375297':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'375298':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'375299':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'37533':{'be': u('\u041c\u0422\u0421'), 'en': 'MTS', 'ru': u('\u041c\u0422\u0421')},
'37544':{'be': 'Velcom', 'en': 'Velcom', 'ru': 'Velcom'},
'3763':{'en': 'Mobiland'},
'3765':{'en': 'Mobiland'},
'3766':{'en': 'Mobiland'},
'3773':{'en': 'Monaco Telecom'},
'3774':{'en': 'Monaco Telecom'},
'3776':{'en': 'Monaco Telecom'},
'37861':{'en': 'TELENET'},
'37866':{'en': 'Telecom Italia San Marino'},
'38050':{'en': 'Vodafone', 'uk': u('Vodafone \u0423\u043a\u0440\u0430\u0457\u043d\u0430')},
'38063':{'en': 'lifecell', 'uk': 'lifecell'},
'38066':{'en': 'Vodafone', 'uk': u('Vodafone \u0423\u043a\u0440\u0430\u0457\u043d\u0430')},
'38067':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38068':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38073':{'en': 'lifecell', 'uk': 'lifecell'},
'38091':{'en': 'TriMob', 'uk': u('\u0422\u0440\u0438\u041c\u043e\u0431')},
'38092':{'en': 'PEOPLEnet', 'uk': 'PEOPLEnet'},
'38093':{'en': 'lifecell', 'uk': 'lifecell'},
'38094':{'en': 'Intertelecom', 'uk': u('\u0406\u043d\u0442\u0435\u0440\u0442\u0435\u043b\u0435\u043a\u043e\u043c')},
'38095':{'en': 'Vodafone', 'uk': u('Vodafone \u0423\u043a\u0440\u0430\u0457\u043d\u0430')},
'38096':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38097':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38098':{'en': 'Kyivstar', 'uk': u('\u041a\u0438\u0457\u0432\u0441\u0442\u0430\u0440')},
'38099':{'en': 'Vodafone', 'uk': u('Vodafone \u0423\u043a\u0440\u0430\u0457\u043d\u0430')},
'38160':{'en': 'VIP'},
'38161':{'en': 'VIP'},
'38162':{'en': 'Telenor'},
'38163':{'en': 'Telenor'},
'38164':{'en': 'Telekom Srbija a.d.'},
'38165':{'en': 'Telekom Srbija a.d.'},
'38166':{'en': 'Telekom Srbija a.d.'},
'381677':{'en': 'GLOBALTEL'},
'381678':{'en': 'Vectone Mobile'},
'38168':{'en': 'VIP'},
'38169':{'en': 'Telenor'},
'38260':{'en': 'm:tel'},
'38263':{'en': 'Telenor'},
'38266':{'en': 'Telekom'},
'38267':{'en': 'Telekom'},
'38268':{'en': 'm:tel'},
'38269':{'en': 'Telenor'},
'38343':{'en': 'IPKO'},
'38344':{'en': 'vala'},
'383451':{'en': 'vala'},
'383452':{'en': 'vala'},
'383453':{'en': 'vala'},
'383454':{'en': 'vala'},
'383455':{'en': 'Z Mobile'},
'383456':{'en': 'Z Mobile'},
'383457':{'en': 'vala'},
'383458':{'en': 'vala'},
'383459':{'en': 'vala'},
'383461':{'en': 'Z Mobile'},
'3834710':{'en': 'mts d.o.o.'},
'3834711':{'en': 'mts d.o.o.'},
'3834712':{'en': 'mts d.o.o.'},
'3834713':{'en': 'mts d.o.o.'},
'3834714':{'en': 'mts d.o.o.'},
'3834715':{'en': 'mts d.o.o.'},
'38348':{'en': 'IPKO'},
'38349':{'en': 'IPKO'},
'38590':{'en': 'Tele2'},
'38591':{'en': 'A1 Telekom'},
'38592':{'en': 'A1 Telekom'},
'38595':{'en': 'Tele2'},
'385970':{'en': 'Hrvatski Telekom'},
'385975':{'en': 'Telefocus'},
'385976':{'en': 'Hrvatski Telekom'},
'385977':{'en': 'Hrvatski Telekom'},
'385979':{'en': 'Hrvatski Telekom'},
'38598':{'en': 'Hrvatski Telekom'},
'38599':{'en': 'Hrvatski Telekom'},
'38630':{'en': 'A1'},
'38631':{'en': 'Telekom Slovenije'},
'38640':{'en': 'A1'},
'38641':{'en': 'Telekom Slovenije'},
'38643':{'en': 'Telekom Slovenije'},
'38649':{'en': 'Telekom Slovenije'},
'38651':{'en': 'Telekom Slovenije'},
'38664':{'en': 'T-2'},
'386651':{'en': u('S\u017d - Infrastruktura')},
'386655':{'en': 'Telekom Slovenije'},
'386656':{'en': 'Telekom Slovenije'},
'386657':{'en': 'Novatel'},
'38668':{'en': 'A1'},
'38669':{'en': 'A1'},
'3866910':{'en': 'Compatel'},
'38670':{'en': 'Telemach'},
'38671':{'en': 'Telemach'},
'38760':{'en': 'BH Telecom'},
'38761':{'en': 'BH Telecom'},
'38762':{'en': 'BH Telecom'},
'38763':{'en': 'HT ERONET'},
'38764':{'en': 'HT ERONET'},
'38765':{'en': 'm:tel'},
'38766':{'en': 'm:tel'},
'38767':{'en': 'm:tel'},
'38970':{'en': 'T-Mobile'},
'38971':{'en': 'T-Mobile'},
'38972':{'en': 'T-Mobile'},
'389732':{'en': 'Vip'},
'389733':{'en': 'ALO Telecom'},
'389734':{'en': 'Vip'},
'389742':{'en': 'T-Mobile'},
'3897421':{'en': 'Mobik'},
'389746':{'en': 'T-Mobile'},
'389747':{'en': 'T-Mobile'},
'38975':{'en': 'Vip'},
'38976':{'en': 'Vip'},
'38977':{'en': 'Vip'},
'38978':{'en': 'Vip'},
'38979':{'en': 'Lycamobile'},
'39319':{'en': 'Intermatica'},
'3932':{'en': 'WIND'},
'3933':{'en': 'TIM'},
'3934':{'en': 'Vodafone'},
'3936':{'en': 'TIM'},
'39370':{'en': 'TIM'},
'39373':{'en': '3 Italia'},
'39377':{'en': 'Vodafone'},
'3938':{'en': 'WIND'},
'39383':{'en': 'Vodafone'},
'3939':{'en': '3 Italia'},
'407000':{'en': 'Enigma-System'},
'407013':{'en': 'Lycamobile'},
'407014':{'en': 'Lycamobile'},
'407015':{'en': 'Lycamobile'},
'407016':{'en': 'Lycamobile'},
'407017':{'en': 'Lycamobile'},
'407018':{'en': 'Lycamobile'},
'407019':{'en': 'Lycamobile'},
'40702':{'en': 'Lycamobile'},
'40705':{'en': 'Iristel'},
'40711':{'en': 'Telekom'},
'40712':{'en': '2K Telecom'},
'4072':{'en': 'Vodafone'},
'4073':{'en': 'Vodafone'},
'4074':{'en': 'Orange'},
'4075':{'en': 'Orange'},
'4076':{'en': 'Telekom'},
'40770':{'en': 'Digi Mobil'},
'40771':{'en': 'Digi Mobil'},
'40772':{'en': 'Digi Mobil'},
'40773':{'en': 'Digi Mobil'},
'40774':{'en': 'Digi Mobil'},
'40775':{'en': 'Digi Mobil'},
'40776':{'en': 'Digi Mobil'},
'40777':{'en': 'Digi Mobil'},
'4078':{'en': 'Telekom'},
'4079':{'en': 'Vodafone'},
'417500':{'en': 'Swisscom'},
'41754':{'en': 'Swisscom'},
'417550':{'en': 'Swisscom'},
'417551':{'en': 'Swisscom'},
'417552':{'en': 'Swisscom'},
'417553':{'en': 'Swisscom'},
'417600':{'en': 'Sunrise'},
'41762':{'en': 'Sunrise'},
'41763':{'en': 'Sunrise'},
'41764':{'en': 'Sunrise'},
'41765':{'en': 'Sunrise'},
'41766':{'en': 'Sunrise'},
'41767':{'en': 'Sunrise'},
'41768':{'en': 'Sunrise'},
'41769':{'en': 'Sunrise'},
'41770':{'en': 'Swisscom'},
'417710':{'en': 'Swisscom'},
'417712':{'en': 'Swisscom'},
'417713':{'en': 'Swisscom'},
'417715':{'en': 'Swisscom'},
'41772':{'en': 'Sunrise'},
'417730':{'en': 'Sunrise'},
'4177310':{'en': 'Sunrise'},
'4177311':{'en': 'Sunrise'},
'4177312':{'en': 'Sunrise'},
'4177313':{'en': 'Sunrise'},
'4177314':{'en': 'Sunrise'},
'4177315':{'en': 'Sunrise'},
'4177316':{'en': 'Sunrise'},
'4177357':{'en': 'In&Phone'},
'41774':{'en': 'Swisscom'},
'417750':{'en': 'Swisscom'},
'417751':{'en': 'Swisscom'},
'417752':{'en': 'Swisscom'},
'417753':{'en': 'Swisscom'},
'417780':{'en': 'BeeOne Communications'},
'417781':{'en': 'BeeOne Communications'},
'417788':{'en': 'Vectone Mobile Limited (Mundio)'},
'417789':{'en': 'Vectone Mobile Limited (Mundio)'},
'41779':{'en': 'Lycamobile'},
'41780':{'en': 'Salt'},
'41781':{'en': 'Salt'},
'41782':{'en': 'Salt'},
'41783':{'en': 'Salt'},
'417840':{'en': 'UPC Switzerland'},
'417841':{'en': 'UPC Switzerland'},
'417842':{'en': 'UPC Switzerland'},
'4178490':{'en': 'Telecom26 AG'},
'41785':{'en': 'Salt'},
'41786':{'en': 'Salt'},
'41787':{'en': 'Salt'},
'41788':{'en': 'Salt'},
'41789':{'en': 'Salt'},
'41790':{'en': 'Swisscom'},
'41791':{'en': 'Swisscom'},
'41792':{'en': 'Swisscom'},
'41793':{'en': 'Swisscom'},
'41794':{'en': 'Swisscom'},
'41795':{'en': 'Swisscom'},
'41796':{'en': 'Swisscom'},
'41797':{'en': 'Swisscom'},
'41798':{'en': 'Swisscom'},
'417990':{'en': 'Swisscom'},
'417991':{'en': 'Swisscom'},
'417992':{'en': 'Swisscom'},
'417993':{'en': 'Swisscom'},
'417994':{'en': 'Swisscom'},
'417995':{'en': 'Swisscom'},
'417996':{'en': 'Swisscom'},
'4179977':{'en': 'Relario AG (Bebbicell)'},
'4179978':{'en': 'Relario AG (Bebbicell)'},
'4179979':{'en': 'Relario AG (Bebbicell)'},
'417999':{'en': 'Comfone AG'},
'420601':{'en': 'O2'},
'420602':{'en': 'O2'},
'420603':{'en': 'T-Mobile'},
'420604':{'en': 'T-Mobile'},
'420605':{'en': 'T-Mobile'},
'420606':{'en': 'O2'},
'420607':{'en': 'O2'},
'420608':{'en': 'Vodafone'},
'420702':{'en': 'O2'},
'42070300':{'en': 'T-Mobile'},
'4207031':{'en': 'T-Mobile'},
'4207032':{'en': 'T-Mobile'},
'4207033':{'en': 'T-Mobile'},
'4207034':{'en': 'T-Mobile'},
'4207035':{'en': 'T-Mobile'},
'4207036':{'en': 'T-Mobile'},
'42070370':{'en': 'FAYN Telecommunications'},
'42070373':{'en': 'COMA'},
'4207038':{'en': 'T-Mobile'},
'4207039':{'en': 'T-Mobile'},
'4207040':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207041':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207042':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207043':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207044':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207045':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207047':{'en': 'SAZKA sazkova kancelar, a.s'},
'4207050':{'en': 'O2'},
'4207051':{'en': 'O2'},
'4207052':{'en': 'O2'},
'4207053':{'en': 'O2'},
'4207054':{'en': 'O2'},
'42070570':{'en': 'T-Mobile'},
'42072':{'en': 'O2'},
'4207300':{'en': 'T-Mobile'},
'4207301':{'en': 'T-Mobile'},
'4207302':{'en': 'T-Mobile'},
'42073030':{'en': 'T-Mobile'},
'42073033':{'en': 'Axfone'},
'42073035':{'en': 'MATERNA Communications'},
'42073040':{'en': 'Compatel'},
'42073041':{'en': 'SMART Comp'},
'42073042':{'en': 'SMART Comp'},
'42073043':{'en': 'PODA a.s. (SkyNet)'},
'42073044':{'en': 'Vodafone'},
'42073045':{'en': 'Vodafone'},
'42073046':{'en': 'Vodafone'},
'42073047':{'en': 'Vodafone'},
'42073048':{'en': 'Vodafone'},
'4207305':{'en': 'T-Mobile'},
'4207306':{'en': 'T-Mobile'},
'42073070':{'en': 'T-Mobile'},
'42073072':{'en': 'Amcatel'},
'42073073':{'en': 'T-Mobile'},
'42073077':{'en': 'T-Mobile'},
'4207308':{'en': 'T-Mobile'},
'4207309':{'en': 'T-Mobile'},
'420731':{'en': 'T-Mobile'},
'420732':{'en': 'T-Mobile'},
'420733':{'en': 'T-Mobile'},
'420734':{'en': 'T-Mobile'},
'420735':{'en': 'T-Mobile'},
'420736':{'en': 'T-Mobile'},
'420737':{'en': 'T-Mobile'},
'420738':{'en': 'T-Mobile'},
'420739':{'en': 'T-Mobile'},
'4207700':{'en': 'Vodafone'},
'4207701':{'en': 'Vodafone'},
'4207702':{'en': 'Vodafone'},
'4207703':{'en': 'Vodafone'},
'4207704':{'en': 'Vodafone'},
'42077050':{'en': 'Compatel'},
'42077051':{'en': '3ton s.r.o.'},
'42077052':{'en': '3ton s.r.o.'},
'42077055':{'en': 'ASTELNET'},
'4207706':{'en': 'Vodafone'},
'42077071':{'en': 'Cesky bezdrat'},
'42077072':{'en': 'Cesky bezdrat'},
'42077073':{'en': 'T-Mobile'},
'42077077':{'en': 'T-Mobile'},
'42077080':{'en': 'Vodafone'},
'42077081':{'en': 'Vodafone'},
'42077082':{'en': 'Vodafone'},
'42077083':{'en': 'Vodafone'},
'42077084':{'en': 'Vodafone'},
'42077100':{'en': 'TT Quality s.r.o.'},
'42077111':{'en': 'miniTEL'},
'42077177':{'en': 'MONTYHO TECHNOLOGY s.r.o. (CANISTEC)'},
'42077200':{'en': 'TT Quality s.r.o.'},
'42077272':{'en': 'IPEX'},
'42077273':{'en': 'IPEX'},
'42077277':{'en': 'Dragon Internet'},
'420773':{'en': 'Vodafone'},
'420774':{'en': 'Vodafone'},
'420775':{'en': 'Vodafone'},
'420776':{'en': 'Vodafone'},
'420777':{'en': 'Vodafone'},
'4207780':{'en': 'Vodafone'},
'42077811':{'en': 'Vodafone'},
'42077812':{'en': 'Vodafone'},
'42077813':{'en': 'Vodafone'},
'42077814':{'en': 'Vodafone'},
'42077815':{'en': 'Vodafone'},
'42077816':{'en': 'Vodafone'},
'42077817':{'en': 'Vodafone'},
'42077818':{'en': 'Vodafone'},
'42077819':{'en': 'Vodafone'},
'4207782':{'en': 'Vodafone'},
'4207783':{'en': 'Vodafone'},
'4207784':{'en': 'Vodafone'},
'4207785':{'en': 'Vodafone'},
'4207786':{'en': 'Vodafone'},
'4207787':{'en': 'Vodafone'},
'42077880':{'en': 'ha-vel internet'},
'42077881':{'en': 'Vodafone'},
'42077882':{'en': 'Vodafone'},
'42077883':{'en': 'Vodafone'},
'42077884':{'en': 'Vodafone'},
'42077885':{'en': 'Vodafone'},
'42077886':{'en': 'Vodafone'},
'42077887':{'en': 'Vodafone'},
'42077888':{'en': 'Vodafone'},
'42077889':{'en': 'Vodafone'},
'4207789':{'en': 'Vodafone'},
'42077900':{'en': 'TT Quality s.r.o.'},
'42077977':{'en': 'TT Quality s.r.o.'},
'42077990':{'en': 'ha-vel internet'},
'42077997':{'en': 'Plus4U Mobile s.r.o.'},
'42077999':{'en': 'T-Mobile'},
'42079000':{'en': 'Nordic Telecom s.r.o.(Air Telecom - MobilKom)'},
'42079058':{'en': 'T-Mobile'},
'42079083':{'en': 'T-Mobile'},
'4207910':{'en': 'TRAVEL TELEKOMMUNIKATION'},
'42079191':{'en': 'T-Mobile'},
'42079192':{'en': '3ton s.r.o.'},
'42079193':{'en': 'GOPE Systems a.s.'},
'4207920':{'en': 'O2'},
'4207921':{'en': 'O2'},
'4207922':{'en': 'O2'},
'4207923':{'en': 'O2'},
'42079234':{'en': 'Tesco Mobile CR'},
'42079235':{'en': 'Tesco Mobile CR'},
'42079238':{'en': 'Tesco Mobile CR'},
'42079240':{'en': 'Tesco Mobile CR'},
'42079241':{'en': 'Tesco Mobile CR'},
'42079242':{'en': 'Tesco Mobile CR'},
'42079243':{'en': 'Tesco Mobile CR'},
'42079244':{'en': 'Tesco Mobile CR'},
'42079245':{'en': 'O2'},
'42079246':{'en': 'O2'},
'42079247':{'en': 'O2'},
'42079248':{'en': 'O2'},
'42079249':{'en': 'O2'},
'4207925':{'en': 'O2'},
'42079260':{'en': 'SIA Net Balt'},
'4207927':{'en': 'O2'},
'42079390':{'en': 'T-Mobile'},
'4207940':{'en': 'Vectone Distribution Czech Republic s.r.o(Mundio)'},
'4207950':{'en': 'Vectone Distribution Czech Republic s.r.o(Mundio)'},
'42079750':{'en': 'Dial Telecom'},
'4207976':{'en': 'T-Mobile'},
'42079770':{'en': 'T-Mobile'},
'42079771':{'en': 'T-Mobile'},
'42079772':{'en': 'T-Mobile'},
'42079775':{'en': 'T-Mobile'},
'42079777':{'en': 'T-Mobile'},
'42079779':{'en': 'T-Mobile'},
'4207978':{'en': 'T-Mobile'},
'42079797':{'en': 'T-Mobile'},
'42079799':{'en': 'T-Mobile'},
'42079900':{'en': 'MAXPROGRES'},
'42079910':{'en': 'New Telekom'},
'42079911':{'en': 'New Telekom'},
'42079920':{'en': 'METRONET'},
'42079950':{'en': 'TERMS'},
'42079951':{'en': 'TERMS'},
'42079952':{'en': 'TERMS'},
'42079979':{'en': 'miniTEL'},
'42079999':{'en': 'MAXPROGRES'},
'42093':{'en': 'T-Mobile'},
'420962':{'en': 'O2'},
'420963':{'en': 'T-Mobile'},
'420964':{'en': 'T-Mobile'},
'420965':{'en': 'T-Mobile'},
'420966':{'en': 'O2'},
'420967':{'en': 'Vodafone'},
'421901':{'en': 'T-Mobile (Slovak Telekom)'},
'421902':{'en': 'T-Mobile (Slovak Telekom)'},
'421903':{'en': 'T-Mobile (Slovak Telekom)'},
'421904':{'en': 'T-Mobile (Slovak Telekom)'},
'421905':{'en': 'Orange'},
'421906':{'en': 'Orange'},
'421907':{'en': 'Orange'},
'421908':{'en': 'Orange'},
'4219091':{'en': 'T-Mobile (Slovak Telekom)'},
'4219092':{'en': 'T-Mobile (Slovak Telekom)'},
'4219093':{'en': 'T-Mobile (Slovak Telekom)'},
'4219094':{'en': 'T-Mobile (Slovak Telekom)'},
'4219095':{'en': 'T-Mobile (Slovak Telekom)'},
'4219096':{'en': 'T-Mobile (Slovak Telekom)'},
'4219097':{'en': 'T-Mobile (Slovak Telekom)'},
'4219098':{'en': 'T-Mobile (Slovak Telekom)'},
'4219099':{'en': 'T-Mobile (Slovak Telekom)'},
'421910':{'en': 'T-Mobile (Slovak Telekom)'},
'421911':{'en': 'T-Mobile (Slovak Telekom)'},
'421912':{'en': 'T-Mobile (Slovak Telekom)'},
'421914':{'en': 'T-Mobile (Slovak Telekom)'},
'421915':{'en': 'Orange'},
'421916':{'en': 'Orange'},
'421917':{'en': 'Orange'},
'421918':{'en': 'Orange'},
'421919':{'en': 'Orange'},
'421940':{'en': 'Telefonica O2'},
'42194312':{'en': 'Alternet, s.r.o.'},
'42194333':{'en': 'IPfon, s.r.o.'},
'421944':{'en': 'Telefonica O2'},
'421945':{'en': 'Orange'},
'421947':{'en': 'Telefonica O2'},
'421948':{'en': 'Telefonica O2'},
'421949':{'en': 'Telefonica O2'},
'421950':{'en': '4ka of SWAN'},
'421951':{'en': '4ka of SWAN'},
'4219598':{'en': 'Slovak Republic Railways (GSM-R)'},
'42364':{'en': 'Soracom'},
'423650':{'en': 'Telecom Liechtenstein'},
'423651':{'en': 'Cubic'},
'423652':{'en': 'Cubic'},
'423653':{'en': 'Cubic'},
'423660':{'en': 'Telecom Liechtenstein'},
'423661':{'en': 'Dimoco'},
'4236620':{'en': 'Telecom Liechtenstein'},
'4236626':{'en': 'Datamobile'},
'4236627':{'en': 'Datamobile'},
'4236628':{'en': 'Datamobile'},
'4236629':{'en': 'Datamobile'},
'423663':{'en': 'Emnify'},
'42373':{'en': 'Telecom Liechtenstein'},
'42374':{'en': 'First Mobile'},
'42377':{'en': 'Swisscom'},
'42378':{'en': 'Salt'},
'42379':{'en': 'Telecom Liechtenstein'},
'43650':{'en': 'tele.ring'},
'43660':{'en': 'Hutchison Drei Austria'},
'43664':{'en': 'A1 TA'},
'43676':{'en': 'T-Mobile AT'},
'436770':{'en': 'T-Mobile AT'},
'436771':{'en': 'T-Mobile AT'},
'436772':{'en': 'T-Mobile AT'},
'436778':{'en': 'T-Mobile AT'},
'436779':{'en': 'T-Mobile AT'},
'4368181':{'en': 'A1 TA'},
'4368182':{'en': 'A1 TA'},
'4368183':{'en': 'Orange AT'},
'4368184':{'en': 'A1 TA'},
'43688':{'en': 'Orange AT'},
'43699':{'en': 'Orange AT'},
'447106':{'en': 'O2'},
'447107':{'en': 'O2'},
'447300':{'en': 'EE'},
'447301':{'en': 'EE'},
'447302':{'en': 'EE'},
'447303':{'en': 'EE'},
'447304':{'en': 'EE'},
'447305':{'en': 'Virgin Mobile'},
'447306':{'en': 'Virgin Mobile'},
'447340':{'en': 'Vodafone'},
'447341':{'en': 'Vodafone'},
'447342':{'en': 'Vodafone'},
'447365':{'en': 'Three'},
'447366':{'en': 'Three'},
'447367':{'en': 'Three'},
'4473680':{'en': 'Teleena'},
'4473682':{'en': 'Sky'},
'4473683':{'en': 'Sky'},
'4473684':{'en': 'Sky'},
'4473685':{'en': 'Sky'},
'4473686':{'en': 'Sky'},
'4473699':{'en': 'Anywhere Sim'},
'447375':{'en': 'EE'},
'447376':{'en': 'EE'},
'447377':{'en': 'EE'},
'447378':{'en': 'Three'},
'4473780':{'en': 'Limitless'},
'447379':{'en': 'Vodafone'},
'447380':{'en': 'Three'},
'4473800':{'en': 'AMSUK'},
'447381':{'en': 'O2'},
'447382':{'en': 'O2'},
'447383':{'en': 'Three'},
'447384':{'en': 'Vodafone'},
'447385':{'en': 'Vodafone'},
'447386':{'en': 'Vodafone'},
'447387':{'en': 'Vodafone'},
'447388':{'en': 'Vodafone'},
'4473890':{'en': 'Three'},
'4473891':{'en': 'Three'},
'4473892':{'en': 'TalkTalk'},
'4473893':{'en': 'TalkTalk'},
'4473894':{'en': 'TalkTalk'},
'4473895':{'en': 'TalkTalk'},
'4473896':{'en': 'Hanhaa'},
'4473897':{'en': 'Vodafone'},
'4473898':{'en': 'Vodafone'},
'4473900':{'en': 'Home Office'},
'447391':{'en': 'Vodafone'},
'447392':{'en': 'Vodafone'},
'447393':{'en': 'Vodafone'},
'447394':{'en': 'O2'},
'447395':{'en': 'O2'},
'447396':{'en': 'EE'},
'4473970':{'en': 'Three'},
'4473971':{'en': 'Three'},
'4473972':{'en': 'Three'},
'4473973':{'en': 'Three'},
'4473975':{'en': 'Three'},
'4473976':{'en': 'Three'},
'4473977':{'en': 'Three'},
'4473978':{'en': 'Three'},
'4473979':{'en': 'Three'},
'447398':{'en': 'EE'},
'447399':{'en': 'EE'},
'447400':{'en': 'Three'},
'447401':{'en': 'Three'},
'447402':{'en': 'Three'},
'447403':{'en': 'Three'},
'447404':{'en': 'Lycamobile'},
'447405':{'en': 'Lycamobile'},
'4474060':{'en': 'Cheers'},
'4474061':{'en': 'Cheers'},
'4474062':{'en': 'Cheers'},
'4474065':{'en': 'Telecom2'},
'4474066':{'en': '24 Seven'},
'4474067':{'en': 'TGL'},
'4474068':{'en': '08Direct'},
'4474069':{'en': 'CardBoardFish'},
'447407':{'en': 'Vodafone'},
'4474080':{'en': 'Truphone'},
'4474081':{'en': 'Truphone'},
'4474082':{'en': 'Truphone'},
'4474088':{'en': 'Truphone'},
'4474089':{'en': 'Truphone'},
'447409':{'en': 'Orange'},
'447410':{'en': 'Orange'},
'447411':{'en': 'Three'},
'447412':{'en': 'Three'},
'447413':{'en': 'Three'},
'447414':{'en': 'Three'},
'447415':{'en': 'EE'},
'447416':{'en': 'Orange'},
'4474171':{'en': 'CardBoardFish'},
'4474172':{'en': 'Core Telecom'},
'4474173':{'en': 'Lycamobile'},
'4474174':{'en': 'Lycamobile'},
'4474175':{'en': 'Lycamobile'},
'4474178':{'en': 'Truphone'},
'4474179':{'en': 'Core Telecom'},
'4474180':{'en': 'Three'},
'4474181':{'en': 'Bellingham'},
'4474182':{'en': 'TGL'},
'4474183':{'en': 'Tismi'},
'4474184':{'en': 'Manx Telecom'},
'4474185':{'en': 'Telna'},
'4474186':{'en': 'Ace Call'},
'4474187':{'en': 'Teleena'},
'4474189':{'en': 'Teleena'},
'447419':{'en': 'Orange'},
'447420':{'en': 'Orange'},
'447421':{'en': 'Orange'},
'447422':{'en': 'Orange'},
'447423':{'en': 'Vodafone'},
'447424':{'en': 'Lycamobile'},
'447425':{'en': 'Vodafone'},
'447426':{'en': 'Three'},
'447427':{'en': 'Three'},
'447428':{'en': 'Three'},
'447429':{'en': 'Three'},
'447430':{'en': 'O2'},
'447431':{'en': 'O2'},
'447432':{'en': 'EE'},
'447433':{'en': 'EE'},
'447434':{'en': 'EE'},
'447435':{'en': 'Vodafone'},
'447436':{'en': 'Vodafone'},
'447437':{'en': 'Vodafone'},
'447438':{'en': 'Lycamobile'},
'4474390':{'en': 'TalkTalk'},
'4474391':{'en': 'TalkTalk'},
'4474392':{'en': 'TalkTalk'},
'4474393':{'en': 'TalkTalk'},
'447440':{'en': 'Lycamobile'},
'4474408':{'en': 'Telecoms Cloud'},
'4474409':{'en': 'Cloud9'},
'4474410':{'en': 'Mediatel'},
'4474411':{'en': 'Andrews & Arnold'},
'4474413':{'en': 'Stour Marine'},
'4474414':{'en': 'Tismi'},
'4474415':{'en': 'Synectiv'},
'4474416':{'en': 'Vodafone'},
'4474417':{'en': 'Synectiv'},
'4474418':{'en': 'Core Telecom'},
'4474419':{'en': 'Voxbone'},
'447442':{'en': 'Vodafone'},
'447443':{'en': 'Vodafone'},
'447444':{'en': 'Vodafone'},
'447445':{'en': 'Three'},
'447446':{'en': 'Three'},
'447447':{'en': 'Three'},
'447448':{'en': 'Lycamobile'},
'447449':{'en': 'Three'},
'447450':{'en': 'Three'},
'447451':{'en': 'Vectone Mobile'},
'4474512':{'en': 'Tismi'},
'4474515':{'en': 'Premium O'},
'4474516':{'en': 'UK Broadband'},
'4474517':{'en': 'UK Broadband'},
'447452':{'en': 'Manx Telecom'},
'4474527':{'en': 'Three'},
'4474528':{'en': 'Three'},
'4474529':{'en': 'Three'},
'447453':{'en': 'Three'},
'447454':{'en': 'Three'},
'447455':{'en': 'Three'},
'447456':{'en': 'Three'},
'4474570':{'en': 'Vectone Mobile'},
'4474571':{'en': 'Vectone Mobile'},
'4474572':{'en': 'Marathon Telecom'},
'4474573':{'en': 'Vectone Mobile'},
'4474574':{'en': 'Voicetec'},
'4474575':{'en': 'Vectone Mobile'},
'4474576':{'en': 'Sure'},
'4474577':{'en': 'Spacetel'},
'4474578':{'en': 'CardBoardFish'},
'4474579':{'en': 'CardBoardFish'},
'4474580':{'en': 'Gamma Telecom'},
'4474581':{'en': 'Gamma Telecom'},
'4474582':{'en': 'Premium Routing'},
'4474583':{'en': 'Virgin Mobile'},
'4474584':{'en': 'Airwave'},
'4474585':{'en': 'Marathon Telecom'},
'4474586':{'en': 'Three'},
'4474587':{'en': 'Limitless'},
'4474588':{'en': 'Limitless'},
'4474589':{'en': 'Three'},
'447459':{'en': 'Lycamobile'},
'447460':{'en': 'Three'},
'447461':{'en': 'O2'},
'447462':{'en': 'Three'},
'447463':{'en': 'Three'},
'447464':{'en': 'Vodafone'},
'447465':{'en': 'Three'},
'4474650':{'en': 'Vectone Mobile'},
'4474651':{'en': 'Vectone Mobile'},
'4474653':{'en': 'Compatel'},
'4474655':{'en': 'GlobalReach'},
'447466':{'en': 'Lycamobile'},
'447467':{'en': 'Vodafone'},
'447468':{'en': 'Vodafone'},
'447469':{'en': 'Vodafone'},
'44747':{'en': 'Three'},
'447470':{'en': 'Vodafone'},
'447471':{'en': 'Vodafone'},
'447480':{'en': 'Three'},
'447481':{'en': 'Three'},
'447482':{'en': 'Three'},
'447483':{'en': 'EE'},
'447484':{'en': 'EE'},
'447485':{'en': 'EE'},
'447486':{'en': 'EE'},
'447487':{'en': 'EE'},
'4474880':{'en': 'Fogg'},
'4474881':{'en': 'CESG'},
'4474882':{'en': 'Sky'},
'4474883':{'en': 'Sky'},
'4474884':{'en': 'Three'},
'4474885':{'en': 'Three'},
'4474886':{'en': 'Lanonyx'},
'4474887':{'en': 'Three'},
'4474888':{'en': 'Ziron'},
'4474889':{'en': 'Three'},
'447489':{'en': 'O2'},
'447490':{'en': 'Three'},
'447491':{'en': 'Three'},
'447492':{'en': 'Three'},
'447493':{'en': 'Vodafone'},
'447494':{'en': 'EE'},
'447495':{'en': 'EE'},
'447496':{'en': 'EE'},
'447497':{'en': 'EE'},
'447498':{'en': 'EE'},
'447499':{'en': 'O2'},
'447500':{'en': 'Vodafone'},
'447501':{'en': 'Vodafone'},
'447502':{'en': 'Vodafone'},
'447503':{'en': 'Vodafone'},
'447504':{'en': 'EE'},
'447505':{'en': 'EE'},
'447506':{'en': 'EE'},
'447507':{'en': 'EE'},
'447508':{'en': 'EE'},
'4475090':{'en': 'JT'},
'4475091':{'en': 'JT'},
'4475092':{'en': 'JT'},
'4475093':{'en': 'JT'},
'4475094':{'en': 'JT'},
'4475095':{'en': 'JT'},
'4475096':{'en': 'JT'},
'4475097':{'en': 'JT'},
'44751':{'en': 'O2'},
'4475200':{'en': 'Simwood'},
'4475201':{'en': 'BT OnePhone'},
'4475202':{'en': 'Vectone Mobile'},
'4475204':{'en': 'Core Communication'},
'4475205':{'en': 'Esendex'},
'4475206':{'en': 'Tismi'},
'4475207':{'en': 'aql'},
'447521':{'en': 'O2'},
'447522':{'en': 'O2'},
'447523':{'en': 'O2'},
'447525':{'en': 'O2'},
'447526':{'en': 'O2'},
'447527':{'en': 'Orange'},
'447528':{'en': 'Orange'},
'447529':{'en': 'Orange'},
'447530':{'en': 'Orange'},
'447531':{'en': 'Orange'},
'4475320':{'en': 'Orange'},
'4475321':{'en': 'Orange'},
'4475322':{'en': 'Orange'},
'4475323':{'en': 'Orange'},
'4475324':{'en': 'Orange'},
'4475325':{'en': 'SMSRelay AG'},
'4475326':{'en': 'Three'},
'4475327':{'en': 'Three'},
'4475328':{'en': 'Three'},
'4475329':{'en': 'Mobiweb'},
'447533':{'en': 'Three'},
'447534':{'en': 'EE'},
'447535':{'en': 'EE'},
'447536':{'en': 'Orange'},
'4475370':{'en': 'Wavecrest'},
'4475371':{'en': 'Stour Marine'},
'4475373':{'en': 'Swiftnet'},
'4475374':{'en': 'Vodafone'},
'4475376':{'en': 'Mediatel'},
'4475377':{'en': 'CFL'},
'4475378':{'en': 'Three'},
'4475379':{'en': 'Three'},
'447538':{'en': 'EE'},
'447539':{'en': 'EE'},
'44754':{'en': 'O2'},
'447550':{'en': 'EE'},
'447551':{'en': 'Vodafone'},
'447552':{'en': 'Vodafone'},
'447553':{'en': 'Vodafone'},
'447554':{'en': 'Vodafone'},
'447555':{'en': 'Vodafone'},
'447556':{'en': 'Orange'},
'447557':{'en': 'Vodafone'},
'4475580':{'en': 'Mobile FX Services Ltd'},
'4475588':{'en': 'Cloud9'},
'4475590':{'en': 'Mars'},
'4475591':{'en': 'LegendTel'},
'4475592':{'en': 'IPV6'},
'4475593':{'en': 'Globecom'},
'4475594':{'en': 'Truphone'},
'4475595':{'en': 'Confabulate'},
'4475596':{'en': 'Lleida.net'},
'4475597':{'en': 'Core Telecom'},
'4475598':{'en': 'Nodemax'},
'4475599':{'en': 'Resilient'},
'44756':{'en': 'O2'},
'447570':{'en': 'Vodafone'},
'4475710':{'en': '09 Mobile'},
'4475718':{'en': 'Alliance'},
'447572':{'en': 'EE'},
'447573':{'en': 'EE'},
'447574':{'en': 'EE'},
'447575':{'en': 'Three'},
'447576':{'en': 'Three'},
'447577':{'en': 'Three'},
'447578':{'en': 'Three'},
'447579':{'en': 'Orange'},
'447580':{'en': 'Orange'},
'447581':{'en': 'Orange'},
'447582':{'en': 'Orange'},
'447583':{'en': 'Orange'},
'447584':{'en': 'Vodafone'},
'447585':{'en': 'Vodafone'},
'447586':{'en': 'Vodafone'},
'447587':{'en': 'Vodafone'},
'447588':{'en': 'Three'},
'4475890':{'en': 'Yim Siam'},
'4475891':{'en': 'Oxygen8'},
'4475892':{'en': 'Oxygen8'},
'4475893':{'en': 'Oxygen8'},
'4475894':{'en': 'Vectone Mobile'},
'4475895':{'en': 'Vectone Mobile'},
'4475896':{'en': 'Vectone Mobile'},
'4475897':{'en': 'Vectone Mobile'},
'4475898':{'en': 'Test2date'},
'44759':{'en': 'O2'},
'4476000':{'en': 'Mediatel'},
'4476002':{'en': 'PageOne'},
'4476006':{'en': '24 Seven'},
'4476007':{'en': 'Relax'},
'4476020':{'en': 'O2'},
'4476022':{'en': 'Relax'},
'447623':{'en': 'PageOne'},
'447624':{'en': 'Manx Telecom'},
'4476242':{'en': 'Sure'},
'44762450':{'en': 'BlueWave Communications'},
'44762456':{'en': 'Sure'},
'447625':{'en': 'O2'},
'447626':{'en': 'O2'},
'4476400':{'en': 'Core Telecom'},
'4476401':{'en': 'Telecom2'},
'4476402':{'en': 'FIO Telecom'},
'4476403':{'en': 'PageOne'},
'4476404':{'en': 'PageOne'},
'4476406':{'en': 'PageOne'},
'4476407':{'en': 'PageOne'},
'4476411':{'en': 'Orange'},
'4476433':{'en': 'Yim Siam'},
'4476440':{'en': 'O2'},
'4476441':{'en': 'O2'},
'4476446':{'en': 'Media'},
'4476542':{'en': 'PageOne'},
'4476543':{'en': 'PageOne'},
'4476545':{'en': 'PageOne'},
'4476546':{'en': 'PageOne'},
'4476591':{'en': 'Vodafone'},
'4476592':{'en': 'PageOne'},
'4476593':{'en': 'Vodafone'},
'4476594':{'en': 'Vodafone'},
'4476595':{'en': 'Vodafone'},
'4476596':{'en': 'Vodafone'},
'4476598':{'en': 'Vodafone'},
'4476599':{'en': 'PageOne'},
'4476600':{'en': 'Plus'},
'4476601':{'en': 'PageOne'},
'4476602':{'en': 'PageOne'},
'4476603':{'en': 'PageOne'},
'4476604':{'en': 'PageOne'},
'4476605':{'en': 'PageOne'},
'4476606':{'en': '24 Seven'},
'4476607':{'en': 'Premium O'},
'4476608':{'en': 'Premium O'},
'4476609':{'en': 'Premium O'},
'447661':{'en': 'PageOne'},
'4476620':{'en': 'Premium O'},
'4476633':{'en': 'Syntec'},
'4476636':{'en': 'Relax'},
'4476637':{'en': 'Vodafone'},
'447666':{'en': 'Vodafone'},
'4476660':{'en': '24 Seven'},
'4476669':{'en': 'FIO Telecom'},
'4476690':{'en': 'O2'},
'4476691':{'en': 'O2'},
'4476692':{'en': 'O2'},
'4476693':{'en': 'Confabulate'},
'4476696':{'en': 'Cheers'},
'4476698':{'en': 'O2'},
'4476699':{'en': 'O2'},
'4476770':{'en': '24 Seven'},
'4476772':{'en': 'Relax'},
'4476776':{'en': 'Telsis'},
'4476778':{'en': 'Core Telecom'},
'4476810':{'en': 'PageOne'},
'4476814':{'en': 'PageOne'},
'4476818':{'en': 'PageOne'},
'447693':{'en': 'O2'},
'447699':{'en': 'Vodafone'},
'44770':{'en': 'O2'},
'4477000':{'en': 'Cloud9'},
'4477001':{'en': 'Nationwide Telephone'},
'4477003':{'en': 'Sure'},
'4477007':{'en': 'Sure'},
'4477008':{'en': 'Sure'},
'44771':{'en': 'O2'},
'447717':{'en': 'Vodafone'},
'447720':{'en': 'O2'},
'447721':{'en': 'Vodafone'},
'447722':{'en': 'EE'},
'447723':{'en': 'Three'},
'447724':{'en': 'O2'},
'447725':{'en': 'O2'},
'447726':{'en': 'EE'},
'447727':{'en': 'Three'},
'447728':{'en': 'Three'},
'447729':{'en': 'O2'},
'44773':{'en': 'O2'},
'447733':{'en': 'Vodafone'},
'447735':{'en': 'Three'},
'447737':{'en': 'Three'},
'447740':{'en': 'O2'},
'447741':{'en': 'Vodafone'},
'447742':{'en': 'O2'},
'447743':{'en': 'O2'},
'4477442':{'en': 'Core Communication'},
'4477443':{'en': 'Core Communication'},
'4477444':{'en': 'Core Communication'},
'4477445':{'en': 'Core Communication'},
'4477446':{'en': 'Core Communication'},
'4477447':{'en': 'Core Communication'},
'4477448':{'en': 'Core Communication'},
'4477449':{'en': 'Core Communication'},
'447745':{'en': 'O2'},
'447746':{'en': 'O2'},
'447747':{'en': 'Vodafone'},
'447748':{'en': 'Vodafone'},
'447749':{'en': 'O2'},
'447750':{'en': 'O2'},
'447751':{'en': 'O2'},
'447752':{'en': 'O2'},
'447753':{'en': 'O2'},
'4477530':{'en': 'Airwave'},
'447754':{'en': 'O2'},
'4477552':{'en': 'Core Communication'},
'4477553':{'en': 'Core Communication'},
'4477554':{'en': 'Core Communication'},
'4477555':{'en': 'Core Communication'},
'447756':{'en': 'O2'},
'447757':{'en': 'EE'},
'447758':{'en': 'EE'},
'447759':{'en': 'O2'},
'44776':{'en': 'Vodafone'},
'447761':{'en': 'O2'},
'447762':{'en': 'O2'},
'447763':{'en': 'O2'},
'447764':{'en': 'O2'},
'44777':{'en': 'Vodafone'},
'447772':{'en': 'Orange'},
'447773':{'en': 'Orange'},
'447777':{'en': 'EE'},
'447779':{'en': 'Orange'},
'44778':{'en': 'Vodafone'},
'447781':{'en': 'Sure'},
'447782':{'en': 'Three'},
'447783':{'en': 'O2'},
'447784':{'en': 'O2'},
'447790':{'en': 'Orange'},
'447791':{'en': 'Orange'},
'447792':{'en': 'Orange'},
'447793':{'en': 'O2'},
'447794':{'en': 'Orange'},
'447795':{'en': 'Vodafone'},
'447796':{'en': 'Vodafone'},
'447797':{'en': 'JT'},
'447798':{'en': 'Vodafone'},
'447799':{'en': 'Vodafone'},
'447800':{'en': 'Orange'},
'447801':{'en': 'O2'},
'447802':{'en': 'O2'},
'447803':{'en': 'O2'},
'447804':{'en': 'EE'},
'447805':{'en': 'Orange'},
'447806':{'en': 'EE'},
'447807':{'en': 'Orange'},
'447808':{'en': 'O2'},
'447809':{'en': 'O2'},
'44781':{'en': 'Orange'},
'447810':{'en': 'Vodafone'},
'447818':{'en': 'Vodafone'},
'447819':{'en': 'O2'},
'447820':{'en': 'O2'},
'447821':{'en': 'O2'},
'4478220':{'en': 'FleXtel'},
'4478221':{'en': 'Swiftnet'},
'4478222':{'en': 'TalkTalk'},
'4478224':{'en': 'aql'},
'4478225':{'en': 'Icron Network'},
'4478226':{'en': 'aql'},
'4478227':{'en': 'Cheers'},
'4478228':{'en': 'Vodafone'},
'4478229':{'en': 'Oxygen8'},
'447823':{'en': 'Vodafone'},
'447824':{'en': 'Vodafone'},
'447825':{'en': 'Vodafone'},
'447826':{'en': 'Vodafone'},
'447827':{'en': 'Vodafone'},
'447828':{'en': 'Three'},
'4478297':{'en': 'Airtel'},
'4478298':{'en': 'Airtel'},
'4478299':{'en': 'Airtel'},
'447830':{'en': 'Three'},
'447831':{'en': 'Vodafone'},
'447832':{'en': 'Three'},
'447833':{'en': 'Vodafone'},
'447834':{'en': 'O2'},
'447835':{'en': 'O2'},
'447836':{'en': 'Vodafone'},
'447837':{'en': 'Orange'},
'447838':{'en': 'Three'},
'4478391':{'en': 'Airtel'},
'4478392':{'en': 'Airtel'},
'4478397':{'en': 'Airtel'},
'4478398':{'en': 'Sure'},
'44784':{'en': 'O2'},
'447846':{'en': 'Three'},
'447847':{'en': 'EE'},
'447848':{'en': 'Three'},
'447850':{'en': 'O2'},
'447851':{'en': 'O2'},
'447852':{'en': 'EE'},
'447853':{'en': 'Three'},
'447854':{'en': 'Orange'},
'447855':{'en': 'Orange'},
'447856':{'en': 'O2'},
'447857':{'en': 'O2'},
'447858':{'en': 'O2'},
'447859':{'en': 'Three'},
'447860':{'en': 'O2'},
'447861':{'en': 'Three'},
'447862':{'en': 'Three'},
'447863':{'en': 'Three'},
'4478640':{'en': 'O2'},
'4478641':{'en': 'O2'},
'4478642':{'en': 'O2'},
'4478643':{'en': 'O2'},
'4478645':{'en': 'O2'},
'4478646':{'en': 'O2'},
'4478647':{'en': 'O2'},
'4478648':{'en': 'O2'},
'4478649':{'en': 'O2'},
'447865':{'en': 'Three'},
'447866':{'en': 'Orange'},
'447867':{'en': 'Vodafone'},
'447868':{'en': 'Three'},
'447869':{'en': 'Three'},
'447870':{'en': 'Orange'},
'447871':{'en': 'O2'},
'447872':{'en': 'O2'},
'4478722':{'en': 'Cloud9'},
'4478727':{'en': 'Telecom 10'},
'447873':{'en': 'O2'},
'4478730':{'en': 'Telesign'},
'4478740':{'en': 'O2'},
'4478741':{'en': 'O2'},
'4478742':{'en': 'O2'},
'4478743':{'en': 'O2'},
'4478744':{'en': 'Citrus'},
'4478746':{'en': 'O2'},
'4478747':{'en': 'O2'},
'4478748':{'en': 'O2'},
'4478749':{'en': 'O2'},
'447875':{'en': 'Orange'},
'447876':{'en': 'Vodafone'},
'447877':{'en': 'Three'},
'447878':{'en': 'Three'},
'447879':{'en': 'Vodafone'},
'447880':{'en': 'Vodafone'},
'447881':{'en': 'Vodafone'},
'447882':{'en': 'Three'},
'447883':{'en': 'Three'},
'447884':{'en': 'Vodafone'},
'447885':{'en': 'O2'},
'447886':{'en': 'Three'},
'447887':{'en': 'Vodafone'},
'447888':{'en': 'Three'},
'447889':{'en': 'O2'},
'447890':{'en': 'Orange'},
'447891':{'en': 'Orange'},
'4478920':{'en': 'HSL'},
'4478921':{'en': 'Vectone Mobile'},
'4478923':{'en': 'O2'},
'4478924':{'en': 'O2'},
'4478925':{'en': 'FleXtel'},
'4478926':{'en': 'O2'},
'4478927':{'en': 'O2'},
'4478928':{'en': 'O2'},
'4478929':{'en': 'O2'},
'4478930':{'en': 'Magrathea'},
'4478931':{'en': '24 Seven'},
'4478932':{'en': 'O2'},
'4478933':{'en': 'Yim Siam'},
'4478934':{'en': 'O2'},
'4478935':{'en': 'O2'},
'4478936':{'en': 'O2'},
'4478937':{'en': 'O2'},
'4478938':{'en': 'aql'},
'4478939':{'en': 'Citrus'},
'447894':{'en': 'O2'},
'447895':{'en': 'O2'},
'447896':{'en': 'Orange'},
'447897':{'en': 'Three'},
'447898':{'en': 'Three'},
'447899':{'en': 'Vodafone'},
'447900':{'en': 'Vodafone'},
'447901':{'en': 'Vodafone'},
'447902':{'en': 'O2'},
'447903':{'en': 'EE'},
'447904':{'en': 'EE'},
'447905':{'en': 'EE'},
'447906':{'en': 'EE'},
'447907':{'en': 'O2'},
'447908':{'en': 'EE'},
'447909':{'en': 'Vodafone'},
'447910':{'en': 'EE'},
'4479110':{'en': 'Marathon Telecom'},
'4479111':{'en': 'JT'},
'4479112':{'en': '24 Seven'},
'4479117':{'en': 'JT'},
'4479118':{'en': '24 Seven'},
'447912':{'en': 'O2'},
'447913':{'en': 'EE'},
'447914':{'en': 'EE'},
'447915':{'en': 'Three'},
'447916':{'en': 'Three'},
'447917':{'en': 'Vodafone'},
'447918':{'en': 'Vodafone'},
'447919':{'en': 'Vodafone'},
'44792':{'en': 'O2'},
'447920':{'en': 'Vodafone'},
'447924':{'en': 'Manx Telecom'},
'4479245':{'en': 'Cloud9'},
'447929':{'en': 'Orange'},
'447930':{'en': 'EE'},
'447931':{'en': 'EE'},
'447932':{'en': 'EE'},
'447933':{'en': 'O2'},
'447934':{'en': 'O2'},
'447935':{'en': 'O2'},
'447936':{'en': 'O2'},
'447937':{'en': 'JT'},
'447938':{'en': 'O2'},
'447939':{'en': 'EE'},
'44794':{'en': 'EE'},
'44795':{'en': 'EE'},
'447955':{'en': 'O2'},
'44796':{'en': 'Orange'},
'447960':{'en': 'EE'},
'447961':{'en': 'EE'},
'447962':{'en': 'EE'},
'447963':{'en': 'EE'},
'447970':{'en': 'Orange'},
'447971':{'en': 'Orange'},
'447972':{'en': 'Orange'},
'447973':{'en': 'Orange'},
'447974':{'en': 'Orange'},
'447975':{'en': 'Orange'},
'447976':{'en': 'Orange'},
'447977':{'en': 'Orange'},
'4479781':{'en': 'QX Telecom'},
'4479782':{'en': 'Cloud9'},
'4479783':{'en': 'Cloud9'},
'4479784':{'en': 'Cheers'},
'4479785':{'en': 'Icron Network'},
'4479786':{'en': 'Oxygen8'},
'4479787':{'en': 'TeleWare'},
'4479788':{'en': 'Truphone'},
'4479789':{'en': 'IV Response'},
'447979':{'en': 'Vodafone'},
'44798':{'en': 'EE'},
'447980':{'en': 'Orange'},
'447988':{'en': 'Three'},
'447989':{'en': 'Orange'},
'447990':{'en': 'Vodafone'},
'447999':{'en': 'O2'},
'45201':{'en': 'tdc'},
'45202':{'en': 'tdc'},
'45203':{'en': 'tdc'},
'45204':{'en': 'tdc'},
'45205':{'en': 'tdc'},
'45206':{'en': 'telenor'},
'45207':{'en': 'telenor'},
'45208':{'en': 'telenor'},
'45209':{'en': 'telenor'},
'45211':{'en': 'tdc'},
'45212':{'en': 'tdc'},
'45213':{'en': 'tdc'},
'45214':{'en': 'tdc'},
'45215':{'en': 'tdc'},
'45216':{'en': 'tdc'},
'45217':{'en': 'tdc'},
'45218':{'en': 'tdc'},
'45219':{'en': 'tdc'},
'45221':{'en': 'telenor'},
'45222':{'en': 'telenor'},
'45223':{'en': 'telenor'},
'45224':{'en': 'telenor'},
'45225':{'en': 'telenor'},
'45226':{'en': 'telenor'},
'45227':{'en': 'telenor'},
'45228':{'en': 'telenor'},
'45229':{'en': 'telenor'},
'45231':{'en': 'tdc'},
'45232':{'en': 'tdc'},
'45233':{'en': 'tdc'},
'45234':{'en': 'tdc'},
'45235':{'en': 'tdc'},
'45236':{'en': 'tdc'},
'45237':{'en': 'tdc'},
'45238':{'en': 'tdc'},
'45239':{'en': 'tdc'},
'452395':{'en': 'telia'},
'45241':{'en': 'tdc'},
'45242':{'en': 'tdc'},
'45243':{'en': 'tdc'},
'45244':{'en': 'tdc'},
'45245':{'en': 'tdc'},
'45246':{'en': 'tdc'},
'45247':{'en': 'tdc'},
'45248':{'en': 'tdc'},
'45249':{'en': 'tdc'},
'45251':{'en': 'telenor'},
'45252':{'en': 'telenor'},
'45253':{'en': 'telenor'},
'45254':{'en': 'telenor'},
'45255':{'en': 'telenor'},
'45256':{'en': 'telenor'},
'45257':{'en': 'telenor'},
'45258':{'en': 'telenor'},
'452590':{'en': 'mi carrier services'},
'452591':{'en': 'link mobile'},
'452592':{'en': 'link mobile'},
'452593':{'en': 'compatel limited'},
'452594':{'en': 'firmafon'},
'452595':{'en': 'link mobile'},
'452596':{'en': 'viptel'},
'452597':{'en': '3'},
'4525980':{'en': 'uni-tel'},
'4525981':{'en': 'mobiweb limited'},
'4525982':{'en': 'jay.net'},
'4525983':{'en': '42 telecom ab'},
'4525984':{'en': 'link mobile'},
'4525985':{'en': '42 telecom ab'},
'4525986':{'en': '42 telecom ab'},
'4525987':{'en': 'netfors unified messaging'},
'4525988':{'en': 'link mobile'},
'4525989':{'en': 'ipnordic'},
'452599':{'en': 'telenor'},
'4526':{'en': 'telia'},
'4527':{'en': 'telia'},
'4528':{'en': 'telia'},
'45291':{'en': 'tdc'},
'45292':{'en': 'tdc'},
'45293':{'en': 'tdc'},
'45294':{'en': 'tdc'},
'45295':{'en': 'tdc'},
'45296':{'en': 'tdc'},
'45297':{'en': 'tdc'},
'45298':{'en': 'tdc'},
'45299':{'en': 'tdc'},
'45301':{'en': 'tdc'},
'45302':{'en': 'tdc'},
'45303':{'en': 'tdc'},
'45304':{'en': 'tdc'},
'45305':{'en': 'tdc'},
'45306':{'en': 'tdc'},
'45307':{'en': 'tdc'},
'45308':{'en': 'tdc'},
'45309':{'en': 'tdc'},
'45311':{'en': '3'},
'45312':{'en': '3'},
'45313':{'en': '3'},
'4531312':{'en': 'mi carrier services'},
'45314':{'en': '3'},
'45315':{'en': '3'},
'45316':{'en': '3'},
'45317':{'en': '3'},
'45318':{'en': 'lycamobile denmark ltd'},
'45319':{'en': 'telenor'},
'45321':{'en': 'telenor'},
'45322':{'en': 'telenor'},
'45323':{'en': 'telenor'},
'45324':{'en': 'telenor'},
'45325':{'en': 'telenor'},
'45326':{'en': 'telenor'},
'45327':{'en': 'telenor'},
'45328':{'en': 'telenor'},
'45329':{'en': 'telenor'},
'45331':{'en': 'telenor'},
'45332':{'en': 'telenor'},
'45333':{'en': 'telenor'},
'45334':{'en': 'telenor'},
'45335':{'en': 'telenor'},
'45336':{'en': 'telenor'},
'45337':{'en': 'telenor'},
'45338':{'en': 'telenor'},
'45339':{'en': 'telenor'},
'45341':{'en': 'telenor'},
'45342':{'en': 'telenor'},
'453434':{'en': 'telenor'},
'45351':{'en': 'telenor'},
'45352':{'en': 'telenor'},
'45353':{'en': 'telenor'},
'45354':{'en': 'telenor'},
'45355':{'en': 'telenor'},
'45356':{'en': 'telenor'},
'45357':{'en': 'telenor'},
'45358':{'en': 'telenor'},
'45359':{'en': 'telenor'},
'45361':{'en': 'telenor'},
'45362':{'en': 'telenor'},
'45363':{'en': 'telenor'},
'45364':{'en': 'telenor'},
'45365':{'en': 'telenor'},
'45366':{'en': 'telenor'},
'45367':{'en': 'telenor'},
'45368':{'en': 'telenor'},
'45369':{'en': 'telenor'},
'45381':{'en': 'telenor'},
'45382':{'en': 'telenor'},
'45383':{'en': 'telenor'},
'45384':{'en': 'telenor'},
'45385':{'en': 'telenor'},
'45386':{'en': 'telenor'},
'45387':{'en': 'telenor'},
'45388':{'en': 'telenor'},
'45389':{'en': 'telenor'},
'45391':{'en': 'telenor'},
'45392':{'en': 'telenor'},
'45393':{'en': 'telenor'},
'45394':{'en': 'telenor'},
'45395':{'en': 'telenor'},
'45396':{'en': 'telenor'},
'45397':{'en': 'telenor'},
'45398':{'en': 'telenor'},
'45399':{'en': 'telenor'},
'45401':{'en': 'tdc'},
'45402':{'en': 'tdc'},
'45403':{'en': 'tdc'},
'45404':{'en': 'tdc'},
'45405':{'en': 'telenor'},
'45406':{'en': 'telenor'},
'45407':{'en': 'telenor'},
'45408':{'en': 'telenor'},
'45409':{'en': 'telenor'},
'45411':{'en': 'telenor'},
'45412':{'en': 'telenor'},
'45413':{'en': 'telenor'},
'45414':{'en': 'telenor'},
'45415':{'en': 'telenor'},
'45416':{'en': 'telenor'},
'45417':{'en': 'telenor'},
'45418':{'en': 'telenor'},
'45419':{'en': 'telenor'},
'45421':{'en': 'telia'},
'45422':{'en': 'telia'},
'45423':{'en': 'telia'},
'45424':{'en': 'telenor'},
'45425':{'en': 'telenor'},
'45426':{'en': 'telenor'},
'45427':{'en': 'telenor'},
'45428':{'en': 'telenor'},
'4542900':{'en': 'telenor'},
'4542901':{'en': 'telenor'},
'4542902':{'en': 'telenor'},
'4542903':{'en': 'telenor'},
'4542904':{'en': 'telenor'},
'4542905':{'en': 'telenor'},
'45429060':{'en': 'telenor'},
'45429061':{'en': 'telenor'},
'45429062':{'en': 'telenor'},
'45429063':{'en': 'telenor'},
'45429064':{'en': 'telenor'},
'45429065':{'en': 'telenor'},
'45429066':{'en': 'telenor'},
'45429067':{'en': 'telenor'},
'45429068':{'en': 'tdc'},
'45429084':{'en': 'tdc'},
'454291':{'en': '3'},
'454292':{'en': '3'},
'454293':{'en': 'cbb mobil'},
'454294':{'en': '3'},
'454295':{'en': '3'},
'454296':{'en': 'telia'},
'454297':{'en': 'telia'},
'454298':{'en': 'telia'},
'454299':{'en': 'telia'},
'45431':{'en': 'telenor'},
'45432':{'en': 'telenor'},
'45433':{'en': 'telenor'},
'45434':{'en': 'telenor'},
'45435':{'en': 'telenor'},
'45436':{'en': 'telenor'},
'45437':{'en': 'telenor'},
'45438':{'en': 'telenor'},
'45439':{'en': 'telenor'},
'45441':{'en': 'telenor'},
'45442':{'en': 'telenor'},
'45443':{'en': 'telenor'},
'45444':{'en': 'telenor'},
'45445':{'en': 'telenor'},
'45446':{'en': 'telenor'},
'45447':{'en': 'telenor'},
'45448':{'en': 'telenor'},
'45449':{'en': 'telenor'},
'45451':{'en': 'telenor'},
'45452':{'en': 'telenor'},
'45453':{'en': 'telenor'},
'45454':{'en': 'telenor'},
'45455':{'en': 'telenor'},
'45456':{'en': 'telenor'},
'45457':{'en': 'telenor'},
'45458':{'en': 'telenor'},
'45459':{'en': 'telenor'},
'45461':{'en': 'telenor'},
'45462':{'en': 'telenor'},
'45463':{'en': 'telenor'},
'45464':{'en': 'telenor'},
'45465':{'en': 'telenor'},
'45466':{'en': 'telenor'},
'45467':{'en': 'telenor'},
'45468':{'en': 'telenor'},
'45469':{'en': 'telenor'},
'45471':{'en': 'telenor'},
'45472':{'en': 'telenor'},
'45473':{'en': 'telenor'},
'45474':{'en': 'telenor'},
'45475':{'en': 'telenor'},
'45476':{'en': 'telenor'},
'45477':{'en': 'telenor'},
'45478':{'en': 'telenor'},
'45479':{'en': 'telenor'},
'45481':{'en': 'telenor'},
'45482':{'en': 'telenor'},
'45483':{'en': 'telenor'},
'45484':{'en': 'telenor'},
'45485':{'en': 'telenor'},
'45486':{'en': 'telenor'},
'45487':{'en': 'telenor'},
'45488':{'en': 'telenor'},
'45489':{'en': 'telenor'},
'4549109':{'en': 'tdc'},
'454911':{'en': 'tdc'},
'454912':{'en': 'tdc'},
'4549130':{'en': 'tdc'},
'4549131':{'en': 'tdc'},
'4549132':{'en': 'tdc'},
'4549133':{'en': 'tdc'},
'4549134':{'en': 'tdc'},
'4549135':{'en': 'tdc'},
'4549136':{'en': 'tdc'},
'4549138':{'en': 'tdc'},
'4549139':{'en': 'tdc'},
'454914':{'en': 'tdc'},
'4549150':{'en': 'tdc'},
'4549151':{'en': 'tdc'},
'4549155':{'en': 'tdc'},
'4549156':{'en': 'tdc'},
'4549157':{'en': 'tdc'},
'4549158':{'en': 'tdc'},
'4549159':{'en': 'tdc'},
'4549160':{'en': 'tdc'},
'4549161':{'en': 'tdc'},
'4549162':{'en': 'tdc'},
'4549163':{'en': 'tdc'},
'4549168':{'en': 'tdc'},
'4549169':{'en': 'tdc'},
'454917':{'en': 'tdc'},
'4549180':{'en': 'tdc'},
'4549181':{'en': 'tdc'},
'4549184':{'en': 'tdc'},
'4549185':{'en': 'tdc'},
'4549187':{'en': 'tdc'},
'4549188':{'en': 'tdc'},
'4549189':{'en': 'tdc'},
'454919':{'en': 'tdc'},
'4549200':{'en': 'tdc'},
'4549201':{'en': 'tdc'},
'4549202':{'en': 'tdc'},
'4549203':{'en': 'tdc'},
'454921':{'en': 'tdc'},
'4549220':{'en': 'tdc'},
'4549221':{'en': 'tdc'},
'4549222':{'en': 'tdc'},
'4549223':{'en': 'tdc'},
'4549224':{'en': 'tdc'},
'4549225':{'en': 'tdc'},
'4549226':{'en': 'tdc'},
'4549250':{'en': 'tdc'},
'4549251':{'en': 'tdc'},
'4549252':{'en': 'tdc'},
'4549253':{'en': 'tdc'},
'4549255':{'en': 'tdc'},
'4549256':{'en': 'tdc'},
'4549258':{'en': 'tdc'},
'4549259':{'en': 'tdc'},
'4549260':{'en': 'tdc'},
'4549261':{'en': 'tdc'},
'4549262':{'en': 'tdc'},
'4549263':{'en': 'tdc'},
'4549264':{'en': 'tdc'},
'4549265':{'en': 'tdc'},
'4549266':{'en': 'tdc'},
'454927':{'en': 'tdc'},
'454928':{'en': 'tdc'},
'4549295':{'en': 'tdc'},
'4549298':{'en': 'tdc'},
'4549299':{'en': 'tdc'},
'45493':{'en': 'telenor'},
'45494':{'en': 'telenor'},
'4549700':{'en': 'tdc'},
'4549701':{'en': 'tdc'},
'4549702':{'en': 'tdc'},
'4549703':{'en': 'tdc'},
'4549704':{'en': 'tdc'},
'4549707':{'en': 'tdc'},
'4549708':{'en': 'tdc'},
'4549709':{'en': 'tdc'},
'454971':{'en': 'tdc'},
'4549750':{'en': 'tdc'},
'4549751':{'en': 'tdc'},
'4549752':{'en': 'tdc'},
'4549753':{'en': 'tdc'},
'4549754':{'en': 'tdc'},
'4549755':{'en': 'tdc'},
'4549758':{'en': 'tdc'},
'4549759':{'en': 'tdc'},
'4549760':{'en': 'tdc'},
'4549761':{'en': 'tdc'},
'4549762':{'en': 'tdc'},
'4549763':{'en': 'tdc'},
'4549765':{'en': 'tdc'},
'4549766':{'en': 'tdc'},
'4549767':{'en': 'tdc'},
'454977':{'en': 'tdc'},
'4549780':{'en': 'tdc'},
'4549789':{'en': 'tdc'},
'45501':{'en': 'telenor'},
'45502':{'en': 'telenor'},
'45503':{'en': 'telenor'},
'45504':{'en': 'telenor'},
'45505':{'en': 'telenor'},
'455060':{'en': 'ipvision'},
'455061':{'en': 'svr technologies (mach connectivity)'},
'455062':{'en': 'cbb mobil'},
'455063':{'en': 'mundio mobile'},
'455064':{'en': 'lycamobile denmark ltd'},
'455065':{'en': 'lebara limited'},
'455066':{'en': 'cbb mobil'},
'455067':{'en': 'cbb mobil'},
'455068':{'en': 'cbb mobil'},
'455069':{'en': '3'},
'45507':{'en': 'telenor'},
'45508':{'en': 'telenor'},
'45509':{'en': 'telenor'},
'4551':{'en': 'tdc'},
'45510':{'en': 'orange'},
'455188':{'en': 'telia'},
'455189':{'en': 'telia'},
'45521':{'en': 'telia'},
'455210':{'en': 'firstcom'},
'455211':{'en': '3'},
'455212':{'en': '3'},
'45522':{'en': 'telia'},
'455220':{'en': 'link mobile'},
'455222':{'en': 'lebara limited'},
'455225':{'en': 'cbb mobil'},
'45523':{'en': 'telia'},
'455230':{'en': 'tdc'},
'455233':{'en': 'cbb mobil'},
'45524':{'en': 'telia'},
'455240':{'en': 'tdc'},
'455242':{'en': 'cbb mobil'},
'455244':{'en': 'cbb mobil'},
'455250':{'en': 'tdc'},
'455251':{'en': 'link mobile'},
'455252':{'en': 'lebara limited'},
'455253':{'en': 'cbb mobil'},
'455254':{'en': 'simservice'},
'455255':{'en': 'cbb mobil'},
'455256':{'en': 'simservice'},
'455257':{'en': 'simservice'},
'455258':{'en': 'tdc'},
'455259':{'en': '42 telecom ab'},
'45526':{'en': 'telenor'},
'45527':{'en': 'telenor'},
'45528':{'en': 'telenor'},
'45529':{'en': 'telenor'},
'45531':{'en': 'cbb mobil'},
'455319':{'en': 'telia'},
'45532':{'en': 'telia'},
'45533':{'en': 'telia'},
'455333':{'en': 'lebara limited'},
'45534':{'en': 'telia'},
'45535':{'en': '3'},
'45536':{'en': '3'},
'45537':{'en': '3'},
'45538':{'en': '3'},
'45539':{'en': 'cbb mobil'},
'455398':{'en': 'nextgen mobile ldt t/a cardboardfish'},
'45541':{'en': 'telenor'},
'45542':{'en': 'telenor'},
'45543':{'en': 'telenor'},
'45544':{'en': 'telenor'},
'45545':{'en': 'telenor'},
'45546':{'en': 'telenor'},
'45547':{'en': 'telenor'},
'45548':{'en': 'telenor'},
'45549':{'en': 'telenor'},
'45551':{'en': 'telenor'},
'45552':{'en': 'telenor'},
'45553':{'en': 'telenor'},
'45554':{'en': 'telenor'},
'45555':{'en': 'telenor'},
'45556':{'en': 'telenor'},
'45557':{'en': 'telenor'},
'45558':{'en': 'telenor'},
'45559':{'en': 'telenor'},
'45561':{'en': 'telenor'},
'45562':{'en': 'telenor'},
'45563':{'en': 'telenor'},
'45564':{'en': 'telenor'},
'45565':{'en': 'telenor'},
'45566':{'en': 'telenor'},
'45567':{'en': 'telenor'},
'45568':{'en': 'telenor'},
'45569':{'en': 'telenor'},
'45571':{'en': 'telenor'},
'45572':{'en': 'telenor'},
'45573':{'en': 'telenor'},
'45574':{'en': 'telenor'},
'45575':{'en': 'telenor'},
'45576':{'en': 'telenor'},
'45577':{'en': 'telenor'},
'45578':{'en': 'telenor'},
'45579':{'en': 'telenor'},
'45581':{'en': 'telenor'},
'45582':{'en': 'telenor'},
'45583':{'en': 'telenor'},
'45584':{'en': 'telenor'},
'45585':{'en': 'telenor'},
'45586':{'en': 'telenor'},
'45587':{'en': 'telenor'},
'45588':{'en': 'telenor'},
'45589':{'en': 'telenor'},
'45591':{'en': 'telenor'},
'45592':{'en': 'telenor'},
'45593':{'en': 'telenor'},
'45594':{'en': 'telenor'},
'45595':{'en': 'telenor'},
'45596':{'en': 'telenor'},
'45597':{'en': 'telenor'},
'45598':{'en': 'telenor'},
'45599':{'en': 'telenor'},
'45601':{'en': 'telia'},
'45602':{'en': 'telia'},
'45603':{'en': 'telia'},
'45604':{'en': 'telia'},
'45605':{'en': '3'},
'456050':{'en': 'telenor'},
'45606':{'en': 'cbb mobil'},
'45607':{'en': 'cbb mobil'},
'45608':{'en': 'cbb mobil'},
'456090':{'en': 'lebara limited'},
'456091':{'en': 'telenor'},
'456092':{'en': 'telenor'},
'456093':{'en': 'telenor'},
'456094':{'en': 'telenor'},
'456095':{'en': 'telenor'},
'456096':{'en': 'tripple track europe'},
'456097':{'en': 'tripple track europe'},
'456098':{'en': 'telavox'},
'456099':{'en': 'svr technologies (mach connectivity)'},
'4561':{'en': 'tdc'},
'45610':{'en': 'orange'},
'456146':{'en': 'telia'},
'45618':{'en': 'telenor'},
'45619':{'en': 'telenor'},
'45621':{'en': 'telenor'},
'45622':{'en': 'telenor'},
'45623':{'en': 'telenor'},
'45624':{'en': 'telenor'},
'45625':{'en': 'telenor'},
'45626':{'en': 'telenor'},
'45627':{'en': 'telenor'},
'45628':{'en': 'telenor'},
'45629':{'en': 'telenor'},
'45631':{'en': 'telenor'},
'45632':{'en': 'telenor'},
'45633':{'en': 'telenor'},
'45634':{'en': 'telenor'},
'45635':{'en': 'telenor'},
'45636':{'en': 'telenor'},
'45637':{'en': 'telenor'},
'45638':{'en': 'telenor'},
'45639':{'en': 'telenor'},
'4564212':{'en': 'tdc'},
'4564215':{'en': 'tdc'},
'4564222':{'en': 'tdc'},
'4564281':{'en': 'tdc'},
'4564292':{'en': 'tdc'},
'4564400':{'en': 'tdc'},
'4564401':{'en': 'tdc'},
'4564402':{'en': 'tdc'},
'4564403':{'en': 'tdc'},
'4564404':{'en': 'tdc'},
'4564406':{'en': 'tdc'},
'456441':{'en': 'tdc'},
'4564421':{'en': 'tdc'},
'4564422':{'en': 'tdc'},
'4564423':{'en': 'tdc'},
'4564431':{'en': 'tdc'},
'4564432':{'en': 'tdc'},
'4564433':{'en': 'tdc'},
'4564441':{'en': 'tdc'},
'4564442':{'en': 'tdc'},
'4564451':{'en': 'tdc'},
'4564457':{'en': 'tdc'},
'4564458':{'en': 'tdc'},
'4564459':{'en': 'tdc'},
'4564460':{'en': 'tdc'},
'4564461':{'en': 'tdc'},
'4564462':{'en': 'tdc'},
'4564471':{'en': 'tdc'},
'4564472':{'en': 'tdc'},
'4564473':{'en': 'tdc'},
'4564474':{'en': 'tdc'},
'4564481':{'en': 'tdc'},
'4564491':{'en': 'tdc'},
'4564492':{'en': 'tdc'},
'4564505':{'en': 'tdc'},
'456463':{'en': 'telenor'},
'456464':{'en': 'waoo'},
'456465':{'en': 'waoo'},
'456466':{'en': 'waoo'},
'456467':{'en': 'waoo'},
'456468':{'en': 'waoo'},
'456469':{'en': 'waoo'},
'456471':{'en': 'tdc'},
'4564721':{'en': 'tdc'},
'4564722':{'en': 'tdc'},
'4564723':{'en': 'tdc'},
'4564731':{'en': 'tdc'},
'4564732':{'en': 'tdc'},
'4564733':{'en': 'tdc'},
'4564741':{'en': 'tdc'},
'4564742':{'en': 'tdc'},
'4564746':{'en': 'tdc'},
'4564747':{'en': 'tdc'},
'4564751':{'en': 'tdc'},
'4564752':{'en': 'tdc'},
'4564761':{'en': 'tdc'},
'4564762':{'en': 'tdc'},
'4564763':{'en': 'tdc'},
'4564764':{'en': 'tdc'},
'4564771':{'en': 'tdc'},
'4564781':{'en': 'tdc'},
'4564787':{'en': 'tdc'},
'4564788':{'en': 'tdc'},
'4564789':{'en': 'tdc'},
'4564790':{'en': 'tdc'},
'4564791':{'en': 'tdc'},
'4564792':{'en': 'tdc'},
'4564801':{'en': 'tdc'},
'4564804':{'en': 'tdc'},
'4564805':{'en': 'tdc'},
'4564806':{'en': 'tdc'},
'4564811':{'en': 'tdc'},
'4564812':{'en': 'tdc'},
'4564813':{'en': 'tdc'},
'4564814':{'en': 'tdc'},
'4564820':{'en': 'tdc'},
'4564821':{'en': 'tdc'},
'4564822':{'en': 'tdc'},
'4564823':{'en': 'tdc'},
'4564824':{'en': 'tdc'},
'4564825':{'en': 'tdc'},
'4564826':{'en': 'tdc'},
'4564827':{'en': 'tdc'},
'4564828':{'en': 'tdc'},
'4564831':{'en': 'tdc'},
'4564841':{'en': 'tdc'},
'4564842':{'en': 'tdc'},
'4564851':{'en': 'tdc'},
'4564852':{'en': 'tdc'},
'4564861':{'en': 'tdc'},
'4564871':{'en': 'tdc'},
'4564872':{'en': 'tdc'},
'4564881':{'en': 'tdc'},
'4564882':{'en': 'tdc'},
'4564891':{'en': 'tdc'},
'4564892':{'en': 'tdc'},
'4564893':{'en': 'tdc'},
'4564897':{'en': 'tdc'},
'4564898':{'en': 'tdc'},
'4564899':{'en': 'tdc'},
'45651':{'en': 'telenor'},
'45652':{'en': 'telenor'},
'45653':{'en': 'telenor'},
'45654':{'en': 'telenor'},
'45655':{'en': 'telenor'},
'45656':{'en': 'telenor'},
'45657':{'en': 'telenor'},
'45658':{'en': 'telenor'},
'45659':{'en': 'telenor'},
'45661':{'en': 'telenor'},
'45662':{'en': 'telenor'},
'45663':{'en': 'telenor'},
'45664':{'en': 'telenor'},
'45665':{'en': 'telenor'},
'45666':{'en': 'telenor'},
'45667':{'en': 'telenor'},
'45668':{'en': 'telenor'},
'45669':{'en': 'telenor'},
'45691':{'en': 'telenor'},
'45692':{'en': 'telenor'},
'45693':{'en': 'telenor'},
'45694':{'en': 'telenor'},
'456957':{'en': 'telenor'},
'456958':{'en': 'telenor'},
'456959':{'en': 'telenor'},
'45696':{'en': 'telenor'},
'45697':{'en': 'telenor'},
'45698':{'en': 'telenor'},
'45699':{'en': 'telenor'},
'457010':{'en': 'tdc'},
'457011':{'en': 'tdc'},
'457012':{'en': 'tdc'},
'457013':{'en': 'tdc'},
'457014':{'en': 'tdc'},
'457015':{'en': 'tdc'},
'4570160':{'en': 'telenor'},
'4570161':{'en': 'telenor'},
'4570180':{'en': 'herobase'},
'4570181':{'en': 'telenor'},
'457019':{'en': 'telenor'},
'457030':{'en': 'telenor'},
'4570300':{'en': 'telia'},
'4570301':{'en': 'telia'},
'4570302':{'en': 'telia'},
'457031':{'en': 'telenor'},
'4570323':{'en': 'telenor'},
'457033':{'en': 'telenor'},
'4570345':{'en': 'telenor'},
'4570444':{'en': 'telenor'},
'4570500':{'en': 'telenor'},
'4570505':{'en': 'telenor'},
'4570507':{'en': 'telus aps'},
'4570555':{'en': 'telenor'},
'457060':{'en': 'telenor'},
'4570666':{'en': 'telenor'},
'457070':{'en': 'telenor'},
'457071':{'en': 'telenor'},
'4570770':{'en': 'telenor'},
'4570776':{'en': 'telenor'},
'4570777':{'en': 'telenor'},
'4570778':{'en': 'telenor'},
'457080':{'en': 'telenor'},
'4570810':{'en': 'telenor'},
'4570811':{'en': 'telenor'},
'4570812':{'en': 'telenor'},
'4570813':{'en': 'telenor'},
'4570814':{'en': 'telenor'},
'4570815':{'en': 'telenor'},
'4570816':{'en': 'telenor'},
'4570817':{'en': 'telenor'},
'4570818':{'en': 'telenor'},
'4570828':{'en': 'telenor'},
'4570838':{'en': 'telenor'},
'4570848':{'en': 'telenor'},
'4570858':{'en': 'telenor'},
'4570868':{'en': 'telenor'},
'457087':{'en': 'telenor'},
'457088':{'en': 'supertel danmark'},
'457089':{'en': 'telenor'},
'4570900':{'en': 'telenor'},
'4570907':{'en': 'telus aps'},
'4570909':{'en': 'telenor'},
'4570999':{'en': 'telenor'},
'45711':{'en': 'telenor'},
'45712':{'en': 'telenor'},
'45713':{'en': 'lycamobile denmark ltd'},
'45714':{'en': 'lycamobile denmark ltd'},
'45715':{'en': 'lycamobile denmark ltd'},
'45716':{'en': 'lycamobile denmark ltd'},
'457170':{'en': 'yousee'},
'457171':{'en': 'telenor'},
'457172':{'en': 'tdc'},
'457173':{'en': 'cbb mobil'},
'45717409':{'en': 'tdc'},
'45717429':{'en': 'tdc'},
'457175':{'en': 'telenor'},
'457176':{'en': 'telenor'},
'457177':{'en': 'tdc'},
'457178':{'en': 'telenor'},
'457179':{'en': 'telenor'},
'45718':{'en': 'lycamobile denmark ltd'},
'457190':{'en': '3'},
'457191':{'en': 'telecom x'},
'457192':{'en': 'fullrate'},
'457193':{'en': 'cbb mobil'},
'457194':{'en': 'telenor'},
'457195':{'en': 'telenor'},
'4571960':{'en': 'tdc'},
'45719649':{'en': 'tdc'},
'45719689':{'en': 'tdc'},
'457197':{'en': 'mundio mobile'},
'457198':{'en': 'mundio mobile'},
'457199':{'en': 'firmafon'},
'45721':{'en': 'telenor'},
'45722':{'en': 'telenor'},
'45723':{'en': 'telenor'},
'45724':{'en': 'telenor'},
'45725':{'en': 'telenor'},
'45726':{'en': 'telenor'},
'45727':{'en': 'telenor'},
'45728':{'en': 'telenor'},
'45729':{'en': 'telenor'},
'45731':{'en': 'telenor'},
'45732':{'en': 'telenor'},
'45733':{'en': 'telenor'},
'45734':{'en': 'telenor'},
'45735':{'en': 'telenor'},
'45736':{'en': 'telenor'},
'45737':{'en': 'telenor'},
'45738':{'en': 'telenor'},
'45739':{'en': 'telenor'},
'45741':{'en': 'telenor'},
'45742':{'en': 'telenor'},
'45743':{'en': 'telenor'},
'45744':{'en': 'telenor'},
'45745':{'en': 'telenor'},
'45746':{'en': 'telenor'},
'45747':{'en': 'telenor'},
'45748':{'en': 'telenor'},
'45749':{'en': 'telenor'},
'45751':{'en': 'telenor'},
'45752':{'en': 'telenor'},
'45753':{'en': 'telenor'},
'45754':{'en': 'telenor'},
'45755':{'en': 'telenor'},
'45756':{'en': 'telenor'},
'45757':{'en': 'telenor'},
'45758':{'en': 'telenor'},
'45759':{'en': 'telenor'},
'45761':{'en': 'telenor'},
'45762':{'en': 'telenor'},
'45763':{'en': 'telenor'},
'45764':{'en': 'telenor'},
'45765':{'en': 'telenor'},
'45766':{'en': 'telenor'},
'45767':{'en': 'telenor'},
'45768':{'en': 'telenor'},
'45769':{'en': 'telenor'},
'45771':{'en': 'telenor'},
'45772':{'en': 'telenor'},
'45773':{'en': 'telenor'},
'45774':{'en': 'telenor'},
'45775':{'en': 'telenor'},
'45776':{'en': 'telenor'},
'45777':{'en': 'telenor'},
'45778':{'en': 'telenor'},
'45779':{'en': 'telenor'},
'45781':{'en': 'telenor'},
'45782':{'en': 'telenor'},
'45783':{'en': 'telenor'},
'45784':{'en': 'telenor'},
'45785':{'en': 'telenor'},
'45786':{'en': 'telenor'},
'45787':{'en': 'telenor'},
'457879':{'en': 'supertel danmark'},
'45788':{'en': 'telenor'},
'45789':{'en': 'telenor'},
'45791':{'en': 'telenor'},
'45792':{'en': 'telenor'},
'45793':{'en': 'telenor'},
'45794':{'en': 'telenor'},
'45795':{'en': 'telenor'},
'45796':{'en': 'telenor'},
'45797':{'en': 'telenor'},
'45798':{'en': 'telenor'},
'45799':{'en': 'telenor'},
'45811':{'en': 'telenor'},
'45812':{'en': 'telenor'},
'458130':{'en': 'cbb mobil'},
'458131':{'en': 'cbb mobil'},
'458132':{'en': 'cbb mobil'},
'458133':{'en': 'cbb mobil'},
'458134':{'en': 'cbb mobil'},
'458135':{'en': 'cbb mobil'},
'458136':{'en': 'cbb mobil'},
'4581370':{'en': 'telenor'},
'4581371':{'en': 'clx networks ab'},
'4581372':{'en': 'care solutions aka phone-it'},
'4581373':{'en': 'tdc'},
'4581374':{'en': 'mitto ag'},
'4581375':{'en': 'monty uk global limited'},
'4581376':{'en': 'icentrex lso(tdc)'},
'4581379':{'en': 'telenor'},
'458138':{'en': 'mundio mobile'},
'458139':{'en': 'mundio mobile'},
'458140':{'en': 'ipnordic'},
'458141':{'en': '3'},
'458144':{'en': 'fullrate'},
'458145':{'en': 'telavox'},
'458146':{'en': 'mundio mobile'},
'458147':{'en': 'mundio mobile'},
'458148':{'en': 'mundio mobile'},
'458149':{'en': 'mundio mobile'},
'45815':{'en': 'cbb mobil'},
'45816':{'en': 'cbb mobil'},
'458161':{'en': 'tdc'},
'458170':{'en': 'cbb mobil'},
'458171':{'en': 'tdc'},
'458172':{'en': 'fullrate'},
'458173':{'en': 'tdc'},
'458174':{'en': 'tdc'},
'458175':{'en': 'tdc'},
'458176':{'en': 'cbb mobil'},
'458177':{'en': 'ipvision'},
'458178':{'en': 'cbb mobil'},
'458179':{'en': 'cbb mobil'},
'45818':{'en': 'cbb mobil'},
'458180':{'en': 'ipvision'},
'458181':{'en': 'maxtel.dk'},
'458182':{'en': 'polperro'},
'458188':{'en': 'ipvision'},
'458190':{'en': 'lebara limited'},
'458191':{'en': 'lebara limited'},
'458192':{'en': 'lebara limited'},
'458193':{'en': 'lebara limited'},
'458194':{'en': 'lebara limited'},
'458195':{'en': 'cbb mobil'},
'458196':{'en': 'cbb mobil'},
'458197':{'en': 'cbb mobil'},
'458198':{'en': 'cbb mobil'},
'458199':{'en': 'telenor'},
'45821':{'en': 'telenor'},
'45822':{'en': 'telenor'},
'45823':{'en': 'telenor'},
'45824':{'en': 'telenor'},
'45825':{'en': 'telenor'},
'45826':{'en': 'telenor'},
'45827':{'en': 'telenor'},
'45828':{'en': 'telenor'},
'45829':{'en': 'telenor'},
'45861':{'en': 'telenor'},
'45862':{'en': 'telenor'},
'45863':{'en': 'telenor'},
'45864':{'en': 'telenor'},
'45865':{'en': 'telenor'},
'45866':{'en': 'telenor'},
'45867':{'en': 'telenor'},
'45868':{'en': 'telenor'},
'45869':{'en': 'telenor'},
'45871':{'en': 'telenor'},
'45872':{'en': 'telenor'},
'45873':{'en': 'telenor'},
'45874':{'en': 'telenor'},
'45875':{'en': 'telenor'},
'45876':{'en': 'telenor'},
'45877':{'en': 'telenor'},
'45878':{'en': 'telenor'},
'45879':{'en': 'telenor'},
'45881':{'en': 'telenor'},
'45882':{'en': 'telenor'},
'45883':{'en': 'telenor'},
'45884':{'en': 'telenor'},
'45885':{'en': 'telenor'},
'45886':{'en': 'telenor'},
'45887':{'en': 'telenor'},
'45888':{'en': 'telenor'},
'45889':{'en': 'telenor'},
'45891':{'en': 'telenor'},
'45892':{'en': 'telenor'},
'45893':{'en': 'telenor'},
'45894':{'en': 'telenor'},
'45895':{'en': 'telenor'},
'45896':{'en': 'telenor'},
'45897':{'en': 'telenor'},
'45898':{'en': 'telenor'},
'45899':{'en': 'telenor'},
'459110':{'en': 'lebara limited'},
'459111':{'en': 'lebara limited'},
'459112':{'en': 'simservice'},
'459113':{'en': 'simservice'},
'459114':{'en': 'simservice'},
'459115':{'en': 'tdc'},
'459116':{'en': 'tdc'},
'459117':{'en': 'tdc'},
'459118':{'en': 'tdc'},
'459119':{'en': 'lebara limited'},
'459120':{'en': 'tismi bv'},
'459121':{'en': 'simservice'},
'459122':{'en': 'tdc'},
'459123':{'en': 'tdc'},
'459124':{'en': 'tdc'},
'459125':{'en': 'tdc'},
'459126':{'en': 'mundio mobile'},
'459127':{'en': 'mundio mobile'},
'459128':{'en': 'mundio mobile'},
'459129':{'en': 'mundio mobile'},
'4591300':{'en': 'maxtel.dk'},
'4591303':{'en': 'maxtel.dk'},
'459131':{'en': 'telenor'},
'459132':{'en': 'telenor'},
'459133':{'en': 'telenor'},
'459134':{'en': 'telenor'},
'459135':{'en': 'telenor'},
'459136':{'en': 'telenor'},
'459137':{'en': 'telenor'},
'459138':{'en': 'telenor'},
'459139':{'en': 'telenor'},
'45914':{'en': 'lycamobile denmark ltd'},
'459150':{'en': 'telenor'},
'459151':{'en': 'telenor'},
'459152':{'en': 'tdc'},
'459153':{'en': 'tdc'},
'459154':{'en': 'tdc'},
'459155':{'en': 'tdc'},
'459156':{'en': 'tdc'},
'459157':{'en': 'mundio mobile'},
'459158':{'en': 'nextgen mobile ldt t/a cardboardfish'},
'459159':{'en': 'simservice'},
'45916':{'en': 'lycamobile denmark ltd'},
'45917':{'en': 'lycamobile denmark ltd'},
'45918':{'en': 'lebara limited'},
'459189':{'en': 'tdc'},
'45919':{'en': 'lebara limited'},
'459190':{'en': 'intelecom'},
'459191':{'en': 'maxtel.dk'},
'45921':{'en': 'tdc'},
'459217':{'en': 'interactive digital media gmbh'},
'459218':{'en': 'telenor'},
'459219':{'en': 'telenor'},
'459220':{'en': 'telenor'},
'459221':{'en': 'tdc'},
'459222':{'en': 'tdc'},
'459223':{'en': '42 telecom ab'},
'459224':{'en': 'simservice'},
'459225':{'en': 'mundio mobile'},
'459226':{'en': 'mundio mobile'},
'459227':{'en': 'mundio mobile'},
'459228':{'en': 'mundio mobile'},
'459229':{'en': 'beepsend ab'},
'45923':{'en': 'telenor'},
'459240':{'en': 'gigsky aps'},
'459241':{'en': 'gigsky aps'},
'459242':{'en': 'gigsky aps'},
'459243':{'en': 'tdc'},
'459244':{'en': 'ipnordic'},
'459245':{'en': 'compatel limited'},
'459246':{'en': 'telenor'},
'459247':{'en': 'telenor'},
'459248':{'en': 'telenor'},
'459249':{'en': 'telenor'},
'45925':{'en': 'telenor'},
'45926':{'en': 'telenor'},
'45927':{'en': 'telenor'},
'459270':{'en': 'ice danmark'},
'459272':{'en': 'thyfon'},
'45928':{'en': 'telenor'},
'459280':{'en': 'voxbone'},
'459281':{'en': 'gigsky aps'},
'459282':{'en': 'flexfone'},
'459283':{'en': 'tdc'},
'45929':{'en': 'telenor'},
'459290':{'en': 'fullrate'},
'459299':{'en': 'ipvision'},
'459310':{'en': 'fullrate'},
'459311':{'en': 'benemen lso (tdc)'},
'459312':{'en': 'tdc'},
'459313':{'en': 'tdc'},
'459314':{'en': 'simservice'},
'459315':{'en': 'simservice'},
'459316':{'en': 'simservice'},
'459317':{'en': 'simservice'},
'459318':{'en': 'simservice'},
'459319':{'en': 'tdc'},
'459320':{'en': 'fullrate'},
'459321':{'en': 'simservice'},
'459322':{'en': 'simservice'},
'459323':{'en': 'simservice'},
'459324':{'en': 'simservice'},
'459325':{'en': 'telenor'},
'459326':{'en': 'telenor'},
'459327':{'en': 'telenor'},
'459328':{'en': 'telenor'},
'459329':{'en': 'telenor'},
'459330':{'en': 'fullrate'},
'459331':{'en': 'tdc'},
'459332':{'en': 'telenor'},
'459333':{'en': 'onoffapp'},
'459334':{'en': 'simservice'},
'459335':{'en': 'simservice'},
'459336':{'en': 'simservice'},
'459337':{'en': 'simservice'},
'459338':{'en': 'simservice'},
'459339':{'en': 'uni-tel'},
'459340':{'en': 'fullrate'},
'459341':{'en': 'telenor'},
'459342':{'en': 'telenor'},
'459343':{'en': 'telenor'},
'459344':{'en': 'telenor'},
'459345':{'en': 'telenor'},
'459346':{'en': 'simservice'},
'459347':{'en': 'simservice'},
'459348':{'en': 'simservice'},
'459349':{'en': 'simservice'},
'45935':{'en': 'telenor'},
'45936':{'en': 'simservice'},
'459360':{'en': '3'},
'459361':{'en': 'telenor'},
'459362':{'en': 'telenor'},
'459363':{'en': 'tdc'},
'459370':{'en': 'telenor'},
'459371':{'en': 'simservice'},
'459372':{'en': 'simservice'},
'459373':{'en': 'simservice'},
'459375':{'en': 'telenor'},
'459376':{'en': 'tdc'},
'459377':{'en': 'tdc'},
'459378':{'en': 'telenor'},
'459379':{'en': 'tdc'},
'45938':{'en': '3'},
'459381':{'en': 'tdc'},
'459382':{'en': 'tdc'},
'45939':{'en': '3'},
'459440':{'en': 'tdc'},
'459441':{'en': 'tdc'},
'459442':{'en': 'tdc'},
'459481':{'en': 'tdc'},
'4596':{'en': 'telenor'},
'45971':{'en': 'telenor'},
'45972':{'en': 'telenor'},
'45973':{'en': 'telenor'},
'45974':{'en': 'telenor'},
'45975':{'en': 'telenor'},
'45976':{'en': 'telenor'},
'45978':{'en': 'telenor'},
'45979':{'en': 'telenor'},
'45981':{'en': 'telenor'},
'45982':{'en': 'telenor'},
'45983':{'en': 'telenor'},
'45984':{'en': 'telenor'},
'45985':{'en': 'telenor'},
'45986':{'en': 'telenor'},
'45987':{'en': 'telenor'},
'45988':{'en': 'telenor'},
'45989':{'en': 'telenor'},
'45991':{'en': 'telenor'},
'45992':{'en': 'telenor'},
'45993':{'en': 'telenor'},
'45994':{'en': 'telenor'},
'45995':{'en': 'telenor'},
'45996':{'en': 'telenor'},
'45997':{'en': 'telenor'},
'45998':{'en': 'telenor'},
'45999':{'en': 'telenor'},
'46700':{'en': 'Tele2 Sverige'},
'467010':{'en': 'SPINBOX AB'},
'467011':{'en': 'Telenor Sverige'},
'467012':{'en': 'SPINBOX AB'},
'46701332':{'en': 'EU Tel AB'},
'46701334':{'en': 'EU Tel AB'},
'46701335':{'en': 'EU Tel AB'},
'46701336':{'en': 'EU Tel AB'},
'46701338':{'en': 'EU Tel AB'},
'46701339':{'en': 'EU Tel AB'},
'46701341':{'en': 'EU Tel AB'},
'46701342':{'en': 'EU Tel AB'},
'46701346':{'en': 'EU Tel AB'},
'46701347':{'en': 'EU Tel AB'},
'46701348':{'en': 'EU Tel AB'},
'46701349':{'en': 'EU Tel AB'},
'46701353':{'en': 'EU Tel AB'},
'46701356':{'en': 'EU Tel AB'},
'46701358':{'en': 'EU Tel AB'},
'46701359':{'en': 'EU Tel AB'},
'46701362':{'en': 'EU Tel AB'},
'46701364':{'en': '42 Telecom AB'},
'46701365':{'en': '42 Telecom AB'},
'46701366':{'en': '42 Telecom AB'},
'46701367':{'en': '42 Telecom AB'},
'46701368':{'en': '42 Telecom AB'},
'46701369':{'en': '42 Telecom AB'},
'4670137':{'en': '42 Telecom AB'},
'46701381':{'en': '42 Telecom AB'},
'46701383':{'en': '42 Telecom AB'},
'46701384':{'en': '42 Telecom AB'},
'46701385':{'en': '42 Telecom AB'},
'46701386':{'en': '42 Telecom AB'},
'46701388':{'en': '42 Telecom AB'},
'46701389':{'en': '42 Telecom AB'},
'46701390':{'en': '42 Telecom AB'},
'46701391':{'en': '42 Telecom AB'},
'46701392':{'en': '42 Telecom AB'},
'46701393':{'en': '42 Telecom AB'},
'46701394':{'en': '42 Telecom AB'},
'46701396':{'en': '42 Telecom AB'},
'46701397':{'en': '42 Telecom AB'},
'46701398':{'en': '42 Telecom AB'},
'46701399':{'en': '42 Telecom AB'},
'467014':{'en': 'Telenor Sverige'},
'467015':{'en': 'Tele2 Sverige'},
'467016':{'en': 'Tele2 Sverige'},
'46701717':{'en': '42 Telecom AB'},
'46701741':{'en': '42 Telecom AB'},
'46701779':{'en': 'EU Tel AB'},
'46701780':{'en': '42 Telecom AB'},
'46701781':{'en': '42 Telecom AB'},
'46701782':{'en': '42 Telecom AB'},
'46701783':{'en': '42 Telecom AB'},
'46701784':{'en': '42 Telecom AB'},
'46701785':{'en': '42 Telecom AB'},
'46701786':{'en': '42 Telecom AB'},
'46701788':{'en': 'Ventelo Sverige'},
'46701790':{'en': 'Svea Billing System'},
'46701791':{'en': 'Svea Billing System'},
'46701792':{'en': 'Svea Billing System'},
'46701793':{'en': 'Svea Billing System'},
'46701794':{'en': 'Svea Billing System'},
'46701795':{'en': 'Svea Billing System'},
'46701796':{'en': 'Svea Billing System'},
'46701797':{'en': 'EU Tel AB'},
'46701798':{'en': 'Gotalandsnatet'},
'467018':{'en': 'SPINBOX AB'},
'4670189':{'en': 'Alltele Sverige'},
'46701897':{'en': 'Gotalandsnatet'},
'4670190':{'en': 'Ventelo Sverige'},
'4670191':{'en': 'Ventelo Sverige'},
'46701920':{'en': 'Viatel Sweden'},
'46701921':{'en': 'Beepsend'},
'46701924':{'en': 'Compatel Limited'},
'46701925':{'en': 'Mobile Arts AB'},
'46701926':{'en': 'Beepsend'},
'46701928':{'en': 'HORISEN AG'},
'4670193':{'en': 'Com Hem'},
'4670194':{'en': 'Gotalandsnatet'},
'4670195':{'en': 'Gotalandsnatet'},
'46701965':{'en': '42 Telecom AB'},
'46701966':{'en': '42 Telecom AB'},
'46701967':{'en': '42 Telecom AB'},
'46701968':{'en': '42 Telecom AB'},
'4670197':{'en': 'Weblink IP Phone'},
'46701977':{'en': '42 Telecom AB'},
'46701978':{'en': '42 Telecom AB'},
'46701979':{'en': '42 Telecom AB'},
'4670198':{'en': 'IP-Only Telecommunication'},
'46701990':{'en': 'Telenor Sverige'},
'46701991':{'en': 'Telenor Sverige'},
'46701992':{'en': 'Telenor Sverige'},
'46701993':{'en': 'Telenor Sverige'},
'46701994':{'en': 'Telenor Sverige'},
'46701995':{'en': 'Telenor Sverige'},
'46701997':{'en': '42 Telecom AB'},
'46701998':{'en': 'MERCURY INTERNATIONA'},
'46701999':{'en': '42 Telecom AB'},
'46702':{'en': 'TeliaSonera'},
'46703':{'en': 'TeliaSonera'},
'46704':{'en': 'Tele2 Sverige'},
'46705':{'en': 'TeliaSonera'},
'46706':{'en': 'TeliaSonera'},
'46707':{'en': 'Tele2 Sverige'},
'46708':{'en': 'Telenor Sverige'},
'46709':{'en': 'Telenor Sverige'},
'467200':{'en': 'Tele2 Sverige'},
'467201':{'en': 'Tele2 Sverige'},
'467202':{'en': 'Tele2 Sverige'},
'467203':{'en': 'Tele2 Sverige'},
'467204':{'en': 'Tele2 Sverige'},
'46720501':{'en': 'Generic Mobil Systems'},
'46720502':{'en': 'Telavox AB'},
'46720503':{'en': 'Telavox AB'},
'46720504':{'en': 'Telavox AB'},
'46720505':{'en': 'Telavox AB'},
'46720506':{'en': 'Telavox AB'},
'46720507':{'en': 'Telavox AB'},
'46720509':{'en': 'Telavox AB'},
'4672051':{'en': 'WIFOG AB'},
'4672052':{'en': 'WIFOG AB'},
'4672053':{'en': 'WIFOG AB'},
'4672054':{'en': 'WIFOG AB'},
'4672055':{'en': 'Bahnhof AB'},
'4672056':{'en': 'Bahnhof AB'},
'4672057':{'en': 'WIFOG AB'},
'46720580':{'en': 'MERCURY INTERNATIONA'},
'46720581':{'en': 'Beepsend'},
'46720582':{'en': 'iCentrex Sweden AB'},
'46720583':{'en': 'iCentrex Sweden AB'},
'46720584':{'en': 'iCentrex Sweden AB'},
'46720585':{'en': 'iCentrex Sweden AB'},
'46720586':{'en': 'iCentrex Sweden AB'},
'4672059':{'en': 'Telenor Sverige'},
'467206':{'en': 'Com Hem'},
'467207':{'en': 'SOLUNO BC AB'},
'46720801':{'en': 'Telavox AB'},
'46720802':{'en': 'Telavox AB'},
'46720803':{'en': 'Telavox AB'},
'46720807':{'en': 'Telavox AB'},
'46720808':{'en': 'Telavox AB'},
'4672081':{'en': 'BM Sverige AB'},
'4672082':{'en': 'Fibio Nordic AB'},
'4672083':{'en': 'Tele2 Sverige'},
'4672084':{'en': 'Tele2 Sverige'},
'4672085':{'en': 'Tele2 Sverige'},
'4672088':{'en': 'Telenor Sverige'},
'46720902':{'en': 'Telavox AB'},
'46720908':{'en': 'Telavox AB'},
'4672092':{'en': 'Telavox AB'},
'46720999':{'en': 'MOBIWEB LTD'},
'467210':{'en': 'SVENSK KONSUMENTMOBI'},
'467211':{'en': 'SVENSK KONSUMENTMOBI'},
'467212':{'en': 'TeliaSonera'},
'467213':{'en': 'TeliaSonera'},
'4672140':{'en': 'Bredband 2'},
'4672141':{'en': 'Tele2 Sverige'},
'4672142':{'en': 'Tele2 Sverige'},
'4672143':{'en': 'Tele2 Sverige'},
'4672144':{'en': 'Tele2 Sverige'},
'4672145':{'en': 'Tele2 Sverige'},
'4672146':{'en': 'Tele2 Sverige'},
'4672147':{'en': 'Tele2 Sverige'},
'4672148':{'en': 'Tele2 Sverige'},
'46721490':{'en': 'Tele2 Sverige'},
'46721491':{'en': 'Tele2 Sverige'},
'46721492':{'en': 'Tele2 Sverige'},
'46721493':{'en': 'Tele2 Sverige'},
'46721494':{'en': 'Tele2 Sverige'},
'46721495':{'en': 'Beepsend'},
'46721497':{'en': 'MONTY UK GLOBAL LIM'},
'46721498':{'en': 'Beepsend'},
'467215':{'en': 'Telenor Sverige'},
'467216':{'en': 'Telenor Sverige'},
'467217':{'en': 'Telenor Sverige'},
'467218':{'en': 'Telenor Sverige'},
'467219':{'en': 'Telenor Sverige'},
'46722':{'en': 'TeliaSonera'},
'467230':{'en': 'HI3G Access'},
'467231':{'en': 'HI3G Access'},
'467232':{'en': 'HI3G Access'},
'467233':{'en': 'HI3G Access'},
'46723401':{'en': 'LOXYTEL AB'},
'46723403':{'en': 'Beepsend'},
'46723404':{'en': 'LOXYTEL AB'},
'46723405':{'en': 'LOXYTEL AB'},
'46723406':{'en': 'LOXYTEL AB'},
'46723407':{'en': 'LOXYTEL AB'},
'46723408':{'en': 'ONOFF TELECOM SAS'},
'46723409':{'en': 'ONOFF TELECOM SAS'},
'4672341':{'en': 'TELIGOO AB (Fello AB)'},
'4672342':{'en': 'Telenor Sverige'},
'4672343':{'en': 'MESSAGEBIRD B.V.'},
'46723440':{'en': 'Beepsend'},
'46723449':{'en': 'Beepsend'},
'4672345':{'en': '42 Telecom AB'},
'46723460':{'en': 'Beepsend'},
'4672347':{'en': 'Benemen Oy'},
'4672348':{'en': 'Benemen Oy'},
'46723490':{'en': 'Beepsend'},
'46723499':{'en': 'Beepsend'},
'467235':{'en': 'Telenor Sverige'},
'467236':{'en': 'Telenor Sverige'},
'467237':{'en': 'Telenor Sverige'},
'467238':{'en': 'Telenor Sverige'},
'467239':{'en': 'Telenor Sverige'},
'46724000':{'en': 'Telenor Sverige'},
'46724001':{'en': 'Beepsend'},
'46724002':{'en': 'Voice Integrate'},
'46724003':{'en': 'Voice Integrate'},
'46724004':{'en': 'Beepsend'},
'46724008':{'en': 'Telavox AB'},
'4672401':{'en': 'Telavox AB'},
'4672402':{'en': 'Telavox AB'},
'467242':{'en': 'WIFOG AB'},
'467243':{'en': 'WIFOG AB'},
'467244':{'en': 'Telenor Sverige'},
'467245':{'en': 'TeliaSonera'},
'467246':{'en': 'TeliaSonera'},
'467247':{'en': 'TeliaSonera'},
'467248':{'en': 'TeliaSonera'},
'467249':{'en': 'TeliaSonera'},
'46725':{'en': 'TeliaSonera'},
'46726000':{'en': 'Beepsend'},
'46726001':{'en': 'FINK TELECOM SERVIC'},
'46726003':{'en': 'MOBIWEB LTD'},
'46726004':{'en': 'Tele2 Sverige'},
'46726005':{'en': 'Tele2 Sverige'},
'46726006':{'en': 'Telavox AB'},
'46726008':{'en': 'Global Telefoni Sve'},
'4672601':{'en': 'Telavox AB'},
'4672606':{'en': 'Tele2 Sverige'},
'467261':{'en': 'GLOBETOUCH AB'},
'467262':{'en': 'GLOBETOUCH AB'},
'467263':{'en': 'GLOBETOUCH AB'},
'46726421':{'en': 'WARSIN HOLDING AB'},
'46726422':{'en': 'Beepsend'},
'46726423':{'en': 'Global Telefoni Sve'},
'46726424':{'en': 'Global Telefoni Sve'},
'46726425':{'en': 'Global Telefoni Sve'},
'46726426':{'en': 'Global Telefoni Sve'},
'46726427':{'en': 'Global Telefoni Sve'},
'46726428':{'en': 'Global Telefoni Sve'},
'46726429':{'en': 'Global Telefoni Sve'},
'4672644':{'en': 'Telenor Sverige'},
'467265':{'en': 'TeliaSonera'},
'4672660':{'en': 'Telenor Sverige'},
'4672666':{'en': 'Telenor Sverige'},
'4672669':{'en': 'Nortech'},
'467267':{'en': 'TeliaSonera'},
'467268':{'en': 'TeliaSonera'},
'4672698':{'en': 'SWEDFONENET AB'},
'46726990':{'en': 'Gotalandsnatet'},
'46726991':{'en': 'Fast Communication'},
'46726992':{'en': 'Fast Communication'},
'46726993':{'en': 'SWEDFONENET AB'},
'46726994':{'en': 'SWEDFONENET AB'},
'46726995':{'en': 'SWEDFONENET AB'},
'46726996':{'en': 'Nortech'},
'46726997':{'en': 'ONOFF TELECOM SAS'},
'46726998':{'en': 'ONOFF TELECOM SAS'},
'467270':{'en': 'TeliaSonera'},
'467271':{'en': 'TeliaSonera'},
'467272':{'en': 'TeliaSonera'},
'467273':{'en': 'TeliaSonera'},
'467274':{'en': 'TeliaSonera'},
'46727501':{'en': 'ONOFF TELECOM SAS'},
'46727502':{'en': 'ONOFF TELECOM SAS'},
'46727503':{'en': 'MINITEL AB'},
'46727504':{'en': 'FINK TELECOM SERVIC'},
'46727506':{'en': 'FINK TELECOM SERVIC'},
'46727507':{'en': 'FINK TELECOM SERVIC'},
'46727510':{'en': 'ONOFF TELECOM SAS'},
'46727511':{'en': 'ONOFF TELECOM SAS'},
'46727515':{'en': 'FINK TELECOM SERVIC'},
'46727516':{'en': 'FINK TELECOM SERVIC'},
'4672753':{'en': 'NETMORE GROUP AB'},
'4672754':{'en': 'Telenor Sverige'},
'4672755':{'en': 'FINK TELECOM SERVIC'},
'4672756':{'en': 'FINK TELECOM SERVIC'},
'467276':{'en': 'Lycamobile Sweden'},
'467277':{'en': 'Lycamobile Sweden'},
'467278':{'en': 'Lycamobile Sweden'},
'46728100':{'en': 'Voice Integrate'},
'46728101':{'en': 'Beepsend'},
'46728198':{'en': 'Telavox AB'},
'467282':{'en': 'Telecom3 Networks'},
'467283':{'en': 'Tele2 Sverige'},
'467284':{'en': 'Tele2 Sverige'},
'467285':{'en': 'Tele2 Sverige'},
'467286':{'en': 'Tele2 Sverige'},
'467287':{'en': 'Tele2 Sverige'},
'467288':{'en': 'Telenor Sverige'},
'467289':{'en': 'Qall Telecom AB'},
'467290':{'en': 'Tele2 Sverige'},
'467291':{'en': 'Tele2 Sverige'},
'467292':{'en': 'Tele2 Sverige'},
'467293':{'en': 'Tele2 Sverige'},
'467294':{'en': 'Tele2 Sverige'},
'467296':{'en': 'Telenor Sverige'},
'467297':{'en': 'Telenor Sverige'},
'467298':{'en': 'Telenor Sverige'},
'467299':{'en': 'Telenor Sverige'},
'46730':{'en': 'TeliaSonera'},
'467301':{'en': 'Maingate (Sierra Wireless)'},
'467310':{'en': 'Telenor Sverige'},
'467311':{'en': 'Maingate (Sierra Wireless)'},
'4673120':{'en': 'Telavox AB'},
'46731214':{'en': 'Voice Integrate'},
'46731215':{'en': 'COOLTEL APS'},
'46731216':{'en': 'HORISEN AG'},
'46731219':{'en': 'CLX Networks AB'},
'4673122':{'en': 'EU Tel AB'},
'4673123':{'en': '42 Telecom AB'},
'46731245':{'en': 'EU Tel AB'},
'46731247':{'en': 'Beepsend'},
'46731248':{'en': 'TELNESS AB'},
'4673125':{'en': 'Telenor Sverige'},
'4673126':{'en': 'Telenor Connexion'},
'4673127':{'en': 'SWEDFONENET AB'},
'4673128':{'en': 'SST Net Sverige AB'},
'4673129':{'en': 'SPIRIUS AB'},
'467313':{'en': 'iMEZ'},
'467314':{'en': 'Telenor Sverige'},
'467315':{'en': 'Telenor Sverige'},
'467316':{'en': 'Alltele Sverige'},
'46731706':{'en': 'Soatso AB'},
'4673171':{'en': 'Ventelo Sverige'},
'46731721':{'en': 'REWICOM SCANDINAVIA'},
'46731723':{'en': 'REWICOM SCANDINAVIA'},
'46731724':{'en': 'REWICOM SCANDINAVIA'},
'46731725':{'en': 'REWICOM SCANDINAVIA'},
'46731726':{'en': 'REWICOM SCANDINAVIA'},
'46731727':{'en': 'Beepsend'},
'46731728':{'en': 'Beepsend'},
'46731729':{'en': 'IPIFY LIMITED'},
'4673173':{'en': 'Svea Billing System'},
'4673174':{'en': 'Svea Billing System'},
'4673175':{'en': 'Svea Billing System'},
'4673176':{'en': 'ID Mobile'},
'4673177':{'en': 'SST Net Sverige AB'},
'4673178':{'en': 'SST Net Sverige AB'},
'4673179':{'en': 'SST Net Sverige AB'},
'467318':{'en': 'ACN Communications Sweden'},
'467319':{'en': 'TeliaSonera'},
'467320':{'en': 'Telenor Sverige'},
'467321':{'en': 'Tele2 Sverige'},
'467322':{'en': 'Tele2 Sverige'},
'467323':{'en': 'Telenor Sverige'},
'467324':{'en': 'Telenor Sverige'},
'467325':{'en': 'Telenor Sverige'},
'467326':{'en': 'Telenor Sverige'},
'467327':{'en': 'Ventelo Sverige'},
'467328':{'en': 'Telenor Sverige'},
'46733':{'en': 'Telenor Sverige'},
'467340':{'en': 'Telenor Sverige'},
'467341':{'en': 'Telenor Sverige'},
'467342':{'en': 'Telenor Sverige'},
'467343':{'en': 'Telenor Sverige'},
'467344':{'en': 'Telenor Sverige'},
'4673450':{'en': 'Weelia Enterprise A'},
'4673451':{'en': 'CELLIP AB'},
'46734520':{'en': 'Soatso AB'},
'46734521':{'en': 'Soatso AB'},
'46734522':{'en': 'Soatso AB'},
'46734523':{'en': 'Soatso AB'},
'46734524':{'en': 'Soatso AB'},
'46734525':{'en': 'Soatso AB'},
'46734527':{'en': 'Soatso AB'},
'46734528':{'en': 'Soatso AB'},
'46734529':{'en': 'Soatso AB'},
'4673454':{'en': 'Tele2 Sverige'},
'4673455':{'en': 'Viatel Sweden'},
'4673456':{'en': 'Svea Billing System'},
'4673457':{'en': 'Telenor Sverige'},
'4673458':{'en': 'Telenor Sverige'},
'4673459':{'en': '42 Telecom AB'},
'467346':{'en': 'Telenor Sverige'},
'4673460':{'en': 'Ventelo Sverige'},
'46734600':{'en': 'MERCURY INTERNATIONA'},
'46734601':{'en': 'MERCURY INTERNATIONA'},
'4673461':{'en': 'Ventelo Sverige'},
'46734700':{'en': '42 Telecom AB'},
'46734702':{'en': 'MOBIWEB LTD'},
'46734703':{'en': 'MOBIWEB LTD'},
'46734704':{'en': 'MOBIWEB LTD'},
'46734705':{'en': 'MOBIWEB LTD'},
'46734706':{'en': 'MOBIWEB LTD'},
'46734707':{'en': 'MOBIWEB LTD'},
'46734708':{'en': 'MOBIWEB LTD'},
'46734709':{'en': 'MOBIWEB LTD'},
'4673471':{'en': 'Telenor Sverige'},
'4673472':{'en': 'Telenor Sverige'},
'46734731':{'en': 'MERCURY INTERNATIONA'},
'46734732':{'en': 'MERCURY INTERNATIONA'},
'46734733':{'en': 'MERCURY INTERNATIONA'},
'46734734':{'en': 'MERCURY INTERNATIONA'},
'46734735':{'en': 'MERCURY INTERNATIONA'},
'46734736':{'en': 'MERCURY INTERNATIONA'},
'46734737':{'en': 'MERCURY INTERNATIONA'},
'46734738':{'en': 'MERCURY INTERNATIONA'},
'46734739':{'en': 'MERCURY INTERNATIONA'},
'46734740':{'en': 'Gotalandsnatet'},
'46734741':{'en': 'Soatso AB'},
'46734743':{'en': 'Soatso AB'},
'46734744':{'en': 'Soatso AB'},
'46734745':{'en': 'Beepsend'},
'46734747':{'en': 'Telavox AB'},
'4673475':{'en': 'Lycamobile Sweden'},
'4673476':{'en': 'Lycamobile Sweden'},
'4673477':{'en': 'Lycamobile Sweden'},
'4673478':{'en': 'Lycamobile Sweden'},
'4673479':{'en': 'Lycamobile Sweden'},
'467348':{'en': 'Lycamobile Sweden'},
'467349':{'en': 'Lycamobile Sweden'},
'467350':{'en': 'HI3G Access'},
'467351':{'en': 'HI3G Access'},
'467352':{'en': 'HI3G Access'},
'467353':{'en': 'HI3G Access'},
'467354':{'en': 'HI3G Access'},
'467355':{'en': 'Tele2 Sverige'},
'467356':{'en': 'Tele2 Sverige'},
'467357':{'en': 'Tele2 Sverige'},
'467358':{'en': 'Tele2 Sverige'},
'467359':{'en': 'Tele2 Sverige'},
'46736':{'en': 'Tele2 Sverige'},
'46737':{'en': 'Tele2 Sverige'},
'467380':{'en': 'TeliaSonera'},
'467381':{'en': 'TeliaSonera'},
'467382':{'en': 'TeliaSonera'},
'467383':{'en': 'TeliaSonera'},
'467384':{'en': 'TeliaSonera'},
'467385':{'en': 'Telenor Sverige'},
'4673860':{'en': 'Telenor Sverige'},
'4673861':{'en': 'Telenor Sverige'},
'4673862':{'en': 'Telenor Sverige'},
'46738631':{'en': 'Beepsend'},
'46738632':{'en': 'Beepsend'},
'46738634':{'en': 'MERCURY INTERNATIONA'},
'46738635':{'en': 'MERCURY INTERNATIONA'},
'46738636':{'en': 'MERCURY INTERNATIONA'},
'46738637':{'en': 'MERCURY INTERNATIONA'},
'46738638':{'en': 'MERCURY INTERNATIONA'},
'46738639':{'en': 'MERCURY INTERNATIONA'},
'46738640':{'en': 'EU Tel AB'},
'46738641':{'en': 'iCentrex Sweden AB'},
'46738642':{'en': '42 Telecom AB'},
'46738643':{'en': 'Beepsend'},
'46738644':{'en': 'Beepsend'},
'46738645':{'en': 'Beepsend'},
'46738647':{'en': 'EU Tel AB'},
'46738651':{'en': 'MERCURY INTERNATIONA'},
'46738652':{'en': 'MERCURY INTERNATIONA'},
'46738653':{'en': 'MERCURY INTERNATIONA'},
'46738654':{'en': 'MERCURY INTERNATIONA'},
'46738655':{'en': 'MERCURY INTERNATIONA'},
'46738656':{'en': 'MERCURY INTERNATIONA'},
'46738657':{'en': 'MERCURY INTERNATIONA'},
'46738658':{'en': 'MERCURY INTERNATIONA'},
'46738659':{'en': 'MERCURY INTERNATIONA'},
'4673866':{'en': 'Tele2 Sverige'},
'4673867':{'en': 'Tele2 Sverige'},
'4673868':{'en': 'Tele2 Sverige'},
'46738691':{'en': 'MERCURY INTERNATIONA'},
'46738692':{'en': 'MERCURY INTERNATIONA'},
'46738693':{'en': 'MERCURY INTERNATIONA'},
'46738694':{'en': 'MERCURY INTERNATIONA'},
'46738695':{'en': 'MERCURY INTERNATIONA'},
'46738696':{'en': 'MERCURY INTERNATIONA'},
'46738697':{'en': 'MERCURY INTERNATIONA'},
'46738698':{'en': 'MERCURY INTERNATIONA'},
'46738699':{'en': 'MERCURY INTERNATIONA'},
'467387':{'en': 'Tele2 Sverige'},
'467388':{'en': 'Telenor Sverige'},
'467389':{'en': 'Tele2 Sverige'},
'46739':{'en': 'Tele2 Sverige'},
'467600':{'en': 'HI3G Access'},
'467601':{'en': 'HI3G Access'},
'467602':{'en': 'HI3G Access'},
'467603':{'en': 'HI3G Access'},
'467604':{'en': 'HI3G Access'},
'467605':{'en': 'Tele2 Sverige'},
'467606':{'en': 'Tele2 Sverige'},
'467607':{'en': 'Tele2 Sverige'},
'467608':{'en': 'Tele2 Sverige'},
'467609':{'en': 'Tele2 Sverige'},
'467610':{'en': 'TeliaSonera'},
'467611':{'en': 'TeliaSonera'},
'467612':{'en': 'TeliaSonera'},
'467613':{'en': 'TeliaSonera'},
'467614':{'en': 'TeliaSonera'},
'467615':{'en': 'Lycamobile Sweden'},
'467616':{'en': 'HI3G Access'},
'467617':{'en': 'HI3G Access'},
'467618':{'en': 'HI3G Access'},
'467619':{'en': 'HI3G Access'},
'46762':{'en': 'Tele2 Sverige'},
'46763':{'en': 'HI3G Access'},
'467635':{'en': 'Telenor Sverige'},
'467636':{'en': 'Telenor Sverige'},
'467637':{'en': 'Telenor Sverige'},
'467638':{'en': 'Easy Telecom AB (BILDNINGSAGENTEN 559)'},
'467640':{'en': 'Tele2 Sverige'},
'467641':{'en': 'Tele2 Sverige'},
'467642':{'en': 'Tele2 Sverige'},
'467643':{'en': 'Lycamobile Sweden'},
'467644':{'en': 'Lycamobile Sweden'},
'467645':{'en': 'Lycamobile Sweden'},
'4676460':{'en': 'Lycamobile Sweden'},
'4676461':{'en': 'Lycamobile Sweden'},
'4676462':{'en': 'Lycamobile Sweden'},
'4676463':{'en': 'Lycamobile Sweden'},
'4676464':{'en': 'Lycamobile Sweden'},
'46764651':{'en': 'EU Tel AB'},
'46764652':{'en': 'MERCURY INTERNATIONA'},
'46764653':{'en': 'MERCURY INTERNATIONA'},
'46764654':{'en': 'MERCURY INTERNATIONA'},
'46764655':{'en': 'MERCURY INTERNATIONA'},
'46764656':{'en': 'MERCURY INTERNATIONA'},
'46764657':{'en': 'MERCURY INTERNATIONA'},
'46764658':{'en': 'MERCURY INTERNATIONA'},
'46764659':{'en': 'MERCURY INTERNATIONA'},
'4676466':{'en': 'Gotalandsnatet'},
'4676467':{'en': 'MERCURY INTERNATIONA'},
'4676468':{'en': 'MERCURY INTERNATIONA'},
'4676469':{'en': 'MERCURY INTERNATIONA'},
'467647':{'en': 'Tele2 Sverige'},
'4676478':{'en': 'WIFOG AB'},
'4676479':{'en': 'Beepsend'},
'467648':{'en': 'GLOBETOUCH AB'},
'46764901':{'en': 'MERCURY INTERNATIONA'},
'46764902':{'en': 'MERCURY INTERNATIONA'},
'46764903':{'en': 'MERCURY INTERNATIONA'},
'46764904':{'en': 'MERCURY INTERNATIONA'},
'46764905':{'en': 'MERCURY INTERNATIONA'},
'46764906':{'en': 'MERCURY INTERNATIONA'},
'46764907':{'en': 'MERCURY INTERNATIONA'},
'46764908':{'en': 'MERCURY INTERNATIONA'},
'46764909':{'en': 'MERCURY INTERNATIONA'},
'4676492':{'en': 'Telavox AB'},
'46764940':{'en': 'Tele2 Sverige'},
'46764942':{'en': 'IPIFY LIMITED'},
'46764943':{'en': 'IPIFY LIMITED'},
'46764944':{'en': 'IPIFY LIMITED'},
'46764945':{'en': 'IPIFY LIMITED'},
'46764946':{'en': 'IPIFY LIMITED'},
'46764947':{'en': 'IPIFY LIMITED'},
'46764948':{'en': 'IPIFY LIMITED'},
'46764949':{'en': 'IPIFY LIMITED'},
'4676495':{'en': 'Tele2 Sverige'},
'4676496':{'en': 'Tele2 Sverige'},
'46764981':{'en': 'MERCURY INTERNATIONA'},
'46764982':{'en': 'MERCURY INTERNATIONA'},
'46764983':{'en': 'MERCURY INTERNATIONA'},
'46764984':{'en': 'MERCURY INTERNATIONA'},
'46764985':{'en': 'MERCURY INTERNATIONA'},
'46764986':{'en': 'MERCURY INTERNATIONA'},
'46764987':{'en': 'MERCURY INTERNATIONA'},
'46764988':{'en': 'MERCURY INTERNATIONA'},
'46764989':{'en': 'MERCURY INTERNATIONA'},
'46764990':{'en': 'Gotalandsnatet'},
'46764991':{'en': 'MERCURY INTERNATIONA'},
'46764992':{'en': 'MERCURY INTERNATIONA'},
'46764993':{'en': 'MERCURY INTERNATIONA'},
'46764994':{'en': 'MERCURY INTERNATIONA'},
'46764995':{'en': 'MERCURY INTERNATIONA'},
'46764996':{'en': 'MERCURY INTERNATIONA'},
'46764997':{'en': 'MERCURY INTERNATIONA'},
'46764998':{'en': 'MERCURY INTERNATIONA'},
'46765':{'en': 'Tele2 Sverige'},
'467660':{'en': 'Telenor Sverige'},
'467661':{'en': 'Telenor Sverige'},
'467662':{'en': 'Telenor Sverige'},
'467663':{'en': 'Telenor Sverige'},
'467664':{'en': 'Telenor Sverige'},
'467665':{'en': 'Tele2 Sverige'},
'4676660':{'en': 'NETETT SVERIGE AB (AINMT Sverige)'},
'4676661':{'en': 'NETETT SVERIGE AB (AINMT Sverige)'},
'4676662':{'en': 'NETETT SVERIGE AB (AINMT Sverige)'},
'4676663':{'en': 'NETETT SVERIGE AB (AINMT Sverige)'},
'4676664':{'en': 'NETETT SVERIGE AB (AINMT Sverige)'},
'4676665':{'en': 'NETETT SVERIGE AB (AINMT Sverige)'},
'4676666':{'en': u('\u00d6RETEL AB')},
'4676667':{'en': 'Unicorn Telecom'},
'4676668':{'en': 'MERCURY INTERNATIONA'},
'46766696':{'en': 'Telavox AB'},
'46766697':{'en': 'Telavox AB'},
'46766698':{'en': 'Telavox AB'},
'4676670':{'en': 'Svea Billing System'},
'4676671':{'en': 'Svea Billing System'},
'4676672':{'en': 'Svea Billing System'},
'4676673':{'en': 'Svea Billing System'},
'4676674':{'en': 'Svea Billing System'},
'46766750':{'en': '42 Telecom AB'},
'46766753':{'en': 'Beepsend'},
'46766754':{'en': 'Beepsend'},
'46766760':{'en': 'Voice Integrate'},
'4676677':{'en': 'Telavox AB'},
'4676678':{'en': 'SWEDFONENET AB'},
'46766791':{'en': 'Beepsend'},
'46766798':{'en': 'Beepsend'},
'46766799':{'en': '42 Telecom AB'},
'467668':{'en': 'Tele2 Sverige'},
'46766901':{'en': 'MERCURY INTERNATIONA'},
'46766902':{'en': 'MERCURY INTERNATIONA'},
'46766903':{'en': 'MERCURY INTERNATIONA'},
'46766904':{'en': 'MERCURY INTERNATIONA'},
'46766905':{'en': 'MERCURY INTERNATIONA'},
'46766906':{'en': 'MERCURY INTERNATIONA'},
'46766907':{'en': 'MERCURY INTERNATIONA'},
'46766908':{'en': 'MERCURY INTERNATIONA'},
'46766909':{'en': 'MERCURY INTERNATIONA'},
'46766911':{'en': 'MERCURY INTERNATIONA'},
'46766912':{'en': 'MERCURY INTERNATIONA'},
'46766913':{'en': 'MERCURY INTERNATIONA'},
'46766914':{'en': 'MERCURY INTERNATIONA'},
'46766915':{'en': 'MERCURY INTERNATIONA'},
'46766916':{'en': 'MERCURY INTERNATIONA'},
'46766917':{'en': 'MERCURY INTERNATIONA'},
'46766918':{'en': 'MERCURY INTERNATIONA'},
'46766919':{'en': 'MERCURY INTERNATIONA'},
'4676692':{'en': 'Voxbone'},
'46766930':{'en': 'MERCURY INTERNATIONA'},
'46766931':{'en': 'Beepsend'},
'46766932':{'en': 'IPIFY LIMITED'},
'46766933':{'en': 'Connectel AB'},
'46766934':{'en': 'IPIFY LIMITED'},
'46766935':{'en': 'Beepsend'},
'46766936':{'en': 'IPIFY LIMITED'},
'46766937':{'en': 'IPIFY LIMITED'},
'46766938':{'en': 'IPIFY LIMITED'},
'4676694':{'en': '42 Telecom AB'},
'4676695':{'en': 'Tele2 Sverige'},
'4676696':{'en': 'Tele2 Sverige'},
'4676697':{'en': 'Tele2 Sverige'},
'4676698':{'en': 'Tele2 Sverige'},
'4676699':{'en': 'Tele2 Sverige'},
'467670':{'en': 'Tele2 Sverige'},
'467671':{'en': 'Tele2 Sverige'},
'4676720':{'en': 'Tele2 Sverige'},
'4676721':{'en': 'Tele2 Sverige'},
'4676722':{'en': 'Tele2 Sverige'},
'4676723':{'en': 'Tele2 Sverige'},
'4676724':{'en': 'Tele2 Sverige'},
'4676725':{'en': 'Tele2 Sverige'},
'46767260':{'en': 'EU Tel AB'},
'46767261':{'en': 'Beepsend'},
'46767262':{'en': 'Beepsend'},
'46767265':{'en': 'HORISEN AG'},
'46767266':{'en': 'Beepsend'},
'46767268':{'en': 'Rebtel Networks'},
'4676727':{'en': 'Telenor Sverige'},
'467674':{'en': 'Lycamobile Sweden'},
'467675':{'en': 'Lycamobile Sweden'},
'467676':{'en': 'TeliaSonera'},
'467677':{'en': 'TeliaSonera'},
'467678':{'en': 'TeliaSonera'},
'467679':{'en': 'TeliaSonera'},
'467680':{'en': 'TeliaSonera'},
'467681':{'en': 'TeliaSonera'},
'467682':{'en': 'TeliaSonera'},
'467683':{'en': 'TeliaSonera'},
'467684':{'en': 'TeliaSonera'},
'467685':{'en': 'Telenor Sverige'},
'467686':{'en': 'Telenor Sverige'},
'467687':{'en': 'Telenor Sverige'},
'467688':{'en': 'Telenor Sverige'},
'467689':{'en': 'Telenor Sverige'},
'467690':{'en': 'Tele2 Sverige'},
'467691':{'en': 'Tele2 Sverige'},
'467692':{'en': 'Tele2 Sverige'},
'467693':{'en': 'Tele2 Sverige'},
'467694':{'en': 'Tele2 Sverige'},
'467695':{'en': 'Lycamobile Sweden'},
'467696':{'en': 'Lycamobile Sweden'},
'467697':{'en': 'Lycamobile Sweden'},
'467698':{'en': 'TeliaSonera'},
'467699':{'en': 'TeliaSonera'},
'4679000':{'en': '0700 LTD'},
'4679001':{'en': 'EU Tel AB'},
'4679002':{'en': '0700 LTD'},
'4679003':{'en': '0700 LTD'},
'4679004':{'en': '0700 LTD'},
'46790050':{'en': 'Telenor Sverige'},
'46790051':{'en': 'Telenor Sverige'},
'46790052':{'en': 'Telenor Sverige'},
'46790053':{'en': 'Telenor Sverige'},
'46790054':{'en': 'Telenor Sverige'},
'46790055':{'en': 'Telenor Sverige'},
'46790056':{'en': 'Telenor Sverige'},
'46790057':{'en': 'Telenor Sverige'},
'4679006':{'en': 'Telavox AB'},
'4679007':{'en': 'FONIA AB'},
'4679008':{'en': 'Voice Integrate'},
'4679009':{'en': 'BIZTELCO SVERIGE AB'},
'467901':{'en': 'Tele2 Sverige'},
'467902':{'en': 'Tele2 Sverige'},
'467903':{'en': 'Tele2 Sverige'},
'467904':{'en': 'Tele2 Sverige'},
'467905':{'en': 'Tele2 Sverige'},
'467906':{'en': 'Tele2 Sverige'},
'467907':{'en': 'Tele2 Sverige'},
'467908':{'en': 'Tele2 Sverige'},
'467909':{'en': 'Tele2 Sverige'},
'467910':{'en': 'TELL ESS AB'},
'467930':{'en': 'HI3G Access'},
'467931':{'en': 'HI3G Access'},
'467932':{'en': 'HI3G Access'},
'467933':{'en': 'HI3G Access'},
'467934':{'en': 'HI3G Access'},
'467950':{'en': 'JUNYVERSE AB'},
'467951':{'en': 'JUNYVERSE AB'},
'467952':{'en': 'JUNYVERSE AB'},
'467953':{'en': 'JUNYVERSE AB'},
'467954':{'en': 'JUNYVERSE AB'},
'4679580':{'en': 'Borderlight'},
'4679581':{'en': 'Borderlight'},
'4679585':{'en': 'Telavox AB'},
'467997':{'en': 'Telenor Sverige'},
'47400':{'en': 'telenor norge'},
'474000':{'en': 'telia'},
'474001':{'en': 'telia'},
'474002':{'en': 'telia'},
'474003':{'en': 'telia'},
'47401':{'en': 'telenor norge'},
'474010':{'en': 'telia'},
'474011':{'en': 'telia'},
'474014':{'en': 'nextgentel'},
'474020':{'en': 'telia'},
'474021':{'en': 'telia'},
'474022':{'en': 'telenor norge'},
'474023':{'en': 'telia'},
'474024':{'en': 'telia'},
'474025':{'en': 'sierra wireless'},
'474026':{'en': 'sierra wireless'},
'474027':{'en': 'sierra wireless'},
'474028':{'en': 'telenor norge'},
'474029':{'en': 'telia'},
'47403':{'en': 'telia'},
'474035':{'en': 'sierra wireless'},
'474036':{'en': 'sierra wireless'},
'474037':{'en': 'sierra wireless'},
'47404':{'en': 'telia'},
'47405':{'en': 'telia'},
'474060':{'en': 'telia'},
'474061':{'en': 'telia'},
'474062':{'en': 'telia'},
'474063':{'en': 'telia'},
'474064':{'en': 'telia'},
'474065':{'en': 'telia telecom solution'},
'474067':{'en': 'nextgentel'},
'474068':{'en': 'telenor norge'},
'474069':{'en': 'telenor norge'},
'47407':{'en': 'telia'},
'47408':{'en': 'telenor norge'},
'474080':{'en': 'telia telecom solution'},
'474081':{'en': 'telia telecom solution'},
'4740820':{'en': 'telia telecom solution'},
'4740821':{'en': 'telia telecom solution'},
'4740822':{'en': 'telia telecom solution'},
'4740823':{'en': 'telia telecom solution'},
'4740824':{'en': 'telia telecom solution'},
'47409':{'en': 'lyca mobile'},
'474090':{'en': 'telia telecom solution'},
'474091':{'en': 'telia telecom solution'},
'4740920':{'en': 'telia telecom solution'},
'4740921':{'en': 'telia telecom solution'},
'4740922':{'en': 'telia telecom solution'},
'4740923':{'en': 'telia telecom solution'},
'4740924':{'en': 'telia telecom solution'},
'4740925':{'en': 'telenor norge'},
'4740926':{'en': 'telenor norge'},
'4740927':{'en': 'telenor norge'},
'4740928':{'en': 'telenor norge'},
'4740929':{'en': 'telenor norge'},
'474093':{'en': 'telenor norge'},
'4741':{'en': 'telenor norge'},
'474100':{'en': 'telia'},
'474101':{'en': 'telia'},
'474104':{'en': 'telia'},
'474106':{'en': 'telia'},
'474107':{'en': 'telia'},
'474110':{'en': 'telia'},
'474111':{'en': 'chilimobil'},
'474112':{'en': 'chilimobil'},
'474113':{'en': 'chilimobil'},
'474114':{'en': 'telia'},
'474115':{'en': 'chilimobil'},
'474116':{'en': 'chilimobil'},
'474117':{'en': 'telia'},
'474118':{'en': 'telia'},
'474119':{'en': 'telia'},
'47412':{'en': 'telia'},
'47413':{'en': 'telia'},
'4745':{'en': 'telia'},
'47453':{'en': 'telenor norge'},
'474536':{'en': 'nkom (nasjonal kommunikasjonsmyndighet)'},
'474537':{'en': 'erate'},
'474538':{'en': 'erate'},
'47455':{'en': 'lyca mobile'},
'47458':{'en': 'telenor norge'},
'474590':{'en': 'telenor norge'},
'474592':{'en': 'lyca mobile'},
'474595':{'en': 'telenor norge'},
'474596':{'en': 'telenor norge'},
'474598':{'en': 'telenor norge'},
'474599':{'en': 'telenor norge'},
'47460':{'en': 'telenor norge'},
'47461':{'en': 'chilimobil'},
'474610':{'en': 'telenor norge'},
'474617':{'en': 'telenor norge'},
'474618':{'en': 'telenor norge'},
'474619':{'en': 'telenor norge'},
'47462':{'en': 'telia'},
'474620':{'en': 'telenor norge'},
'474628':{'en': 'erate'},
'474629':{'en': 'erate'},
'47463':{'en': 'telia'},
'47464':{'en': 'NetCom'},
'474650':{'en': 'telia'},
'474651':{'en': 'ice norge'},
'474652':{'en': 'ice norge'},
'474653':{'en': 'ice norge'},
'474654':{'en': 'telia'},
'474655':{'en': 'telia'},
'474656':{'en': 'telia'},
'474657':{'en': 'telia'},
'474658':{'en': 'telia'},
'474659':{'en': 'telia'},
'47466':{'en': 'telia'},
'474666':{'en': 'telenor norge'},
'474667':{'en': 'telenor norge'},
'474670':{'en': 'telia'},
'474671':{'en': 'lyca mobile'},
'474672':{'en': 'lyca mobile'},
'474674':{'en': 'telia'},
'474675':{'en': 'telia'},
'474676':{'en': 'telia'},
'474677':{'en': 'telia'},
'474678':{'en': 'telia'},
'474679':{'en': 'telia'},
'47468':{'en': 'telenor norge'},
'474690':{'en': 'telenor norge'},
'474691':{'en': 'telenor norge'},
'474692':{'en': 'telenor norge'},
'474693':{'en': 'telenor norge'},
'474694':{'en': 'telenor norge'},
'474695':{'en': 'telenor norge'},
'474696':{'en': 'telenor norge'},
'474697':{'en': 'telia'},
'474698':{'en': 'telenor norge'},
'47470':{'en': 'telenor norge'},
'474710':{'en': 'telenor norge'},
'474711':{'en': 'telenor norge'},
'474712':{'en': 'telenor norge'},
'474713':{'en': 'telia'},
'474714':{'en': 'telia'},
'474715':{'en': 'telia'},
'474716':{'en': 'telia'},
'474717':{'en': 'telia'},
'474718':{'en': 'chilimobil'},
'474719':{'en': 'chilimobil'},
'47472':{'en': 'telia'},
'47473':{'en': 'telia'},
'47474':{'en': 'telia'},
'474740':{'en': 'telenor norge'},
'474741':{'en': 'telenor norge'},
'474742':{'en': 'telenor norge'},
'474743':{'en': 'telenor norge'},
'47475':{'en': 'altibox'},
'474750':{'en': 'telenor norge'},
'474751':{'en': 'telenor norge'},
'47476':{'en': 'telenor norge'},
'474769':{'en': 'telia'},
'47477':{'en': 'telia'},
'474770':{'en': 'telenor norge'},
'474771':{'en': 'telenor norge'},
'474775':{'en': 'telenor norge'},
'474776':{'en': 'telenor norge'},
'47478':{'en': 'telenor norge'},
'47479':{'en': 'telia'},
'474790':{'en': 'telenor norge'},
'474798':{'en': 'telenor norge'},
'474799':{'en': 'telenor norge'},
'47480':{'en': 'telenor norge'},
'47481':{'en': 'telenor norge'},
'47482':{'en': 'telenor norge'},
'474830':{'en': 'telenor norge'},
'474831':{'en': 'telenor norge'},
'474832':{'en': 'telenor norge'},
'474833':{'en': 'telia'},
'474834':{'en': 'telia'},
'474835':{'en': 'telia'},
'474836':{'en': 'telia'},
'474838':{'en': 'ice norge'},
'474839':{'en': 'ice norge'},
'47484':{'en': 'telia'},
'474841':{'en': 'telenor norge'},
'474842':{'en': 'telenor norge'},
'474848':{'en': 'erate'},
'474849':{'en': 'erate'},
'474850':{'en': 'telia'},
'474851':{'en': 'nextgentel'},
'474858':{'en': 'telenor norge'},
'474859':{'en': 'erate'},
'474860':{'en': 'telia'},
'474861':{'en': 'telia'},
'474862':{'en': 'telia'},
'474863':{'en': 'telia'},
'474864':{'en': 'telia'},
'474865':{'en': 'telia'},
'474866':{'en': 'telia'},
'474867':{'en': 'telia'},
'474868':{'en': 'telia'},
'474884':{'en': 'telenor norge'},
'474885':{'en': 'telenor norge'},
'474886':{'en': 'telia'},
'474888':{'en': 'telia'},
'474889':{'en': 'telia'},
'474890':{'en': 'telenor norge'},
'474891':{'en': 'telenor norge'},
'474892':{'en': 'telenor norge'},
'474893':{'en': 'telia'},
'474894':{'en': 'telenor norge'},
'474895':{'en': 'telia'},
'474896':{'en': 'telenor norge'},
'474898':{'en': 'telenor norge'},
'474899':{'en': 'telia'},
'47591':{'en': 'telenor norge'},
'4790':{'en': 'telenor norge'},
'479042':{'en': 'svea billing services'},
'479043':{'en': 'svea billing services'},
'479044':{'en': 'svea billing services'},
'479048':{'en': 'telavox'},
'479049':{'en': 'telavox'},
'4791':{'en': 'telenor norge'},
'479120':{'en': 'chilimobil'},
'479121':{'en': 'chilimobil'},
'479122':{'en': 'chilimobil'},
'479123':{'en': 'chilimobil'},
'479125':{'en': 'lyca mobile'},
'479126':{'en': 'lyca mobile'},
'479127':{'en': 'lyca mobile'},
'479128':{'en': 'lyca mobile'},
'479129':{'en': 'lyca mobile'},
'4792':{'en': 'telia'},
'479218':{'en': 'telenor norge'},
'479219':{'en': 'telenor norge'},
'479236':{'en': 'telenor norge'},
'479238':{'en': 'telenor norge'},
'479239':{'en': 'telenor norge'},
'479258':{'en': 'telenor norge'},
'479259':{'en': 'telenor norge'},
'47927':{'en': 'telenor norge'},
'47929':{'en': 'telenor norge'},
'47930':{'en': 'telia'},
'479310':{'en': 'telenor norge'},
'479311':{'en': 'telenor norge'},
'479312':{'en': 'telenor norge'},
'479313':{'en': 'telenor norge'},
'479314':{'en': 'telenor norge'},
'479315':{'en': 'telenor norge'},
'479316':{'en': 'telenor norge'},
'479318':{'en': 'telenor norge'},
'479319':{'en': 'telenor norge'},
'47932':{'en': 'telia'},
'479330':{'en': 'telenor norge'},
'479331':{'en': 'telenor norge'},
'479332':{'en': 'telenor norge'},
'479333':{'en': 'telenor norge'},
'479334':{'en': 'telenor norge'},
'479335':{'en': 'telenor norge'},
'479336':{'en': 'telenor norge'},
'479337':{'en': 'telia'},
'479338':{'en': 'telenor norge'},
'479339':{'en': 'telenor norge'},
'47934':{'en': 'telia'},
'479350':{'en': 'telenor norge'},
'479351':{'en': 'telenor norge'},
'479352':{'en': 'telenor norge'},
'479353':{'en': 'telenor norge'},
'479354':{'en': 'telenor norge'},
'479355':{'en': 'telenor norge'},
'479356':{'en': 'telenor norge'},
'479357':{'en': 'telia'},
'479358':{'en': 'telenor norge'},
'479359':{'en': 'telenor norge'},
'47936':{'en': 'telia'},
'479370':{'en': 'telenor norge'},
'479371':{'en': 'telenor norge'},
'479372':{'en': 'telenor norge'},
'479373':{'en': 'telenor norge'},
'479374':{'en': 'telenor norge'},
'479375':{'en': 'telenor norge'},
'479376':{'en': 'telenor norge'},
'479377':{'en': 'telia'},
'479378':{'en': 'telenor norge'},
'479379':{'en': 'telenor norge'},
'47938':{'en': 'telia'},
'47939':{'en': 'telia'},
'479390':{'en': 'telenor norge'},
'479400':{'en': 'telia'},
'479401':{'en': 'telia'},
'479402':{'en': 'telia'},
'479403':{'en': 'telenor norge'},
'479404':{'en': 'com4'},
'479405':{'en': 'telenor norge'},
'479406':{'en': 'telenor norge'},
'479407':{'en': 'telenor norge'},
'479408':{'en': 'ice norge'},
'479409':{'en': 'ice norge'},
'47941':{'en': 'telenor norge'},
'479410':{'en': 'telia'},
'479411':{'en': 'telia'},
'479412':{'en': 'telia'},
'47942':{'en': 'telia'},
'47943':{'en': 'telenor norge'},
'479440':{'en': 'telenor norge'},
'479441':{'en': 'telenor norge'},
'479442':{'en': 'telia'},
'479443':{'en': 'telia'},
'479444':{'en': 'telenor norge'},
'479445':{'en': 'telenor norge'},
'479446':{'en': 'telenor norge'},
'479447':{'en': 'telia'},
'479448':{'en': 'telia'},
'479449':{'en': 'telia'},
'479450':{'en': 'telia telecom solution'},
'479451':{'en': 'telia telecom solution'},
'479452':{'en': 'telia telecom solution'},
'479453':{'en': 'telia telecom solution'},
'479454':{'en': 'telia telecom solution'},
'479471':{'en': 'lyca mobile'},
'479472':{'en': 'lyca mobile'},
'479473':{'en': 'lyca mobile'},
'479474':{'en': 'telenor norge'},
'479475':{'en': 'telenor norge'},
'479476':{'en': 'telenor norge'},
'479477':{'en': 'telenor norge'},
'479478':{'en': 'telenor norge'},
'479479':{'en': 'telenor norge'},
'47948':{'en': 'telenor norge'},
'47949':{'en': 'telenor norge'},
'479499':{'en': 'telia'},
'4795':{'en': 'telenor norge'},
'479600':{'en': 'phonect'},
'479601':{'en': 'telenor norge'},
'479604':{'en': 'telenor norge'},
'479609':{'en': 'telenor norge'},
'47961':{'en': 'telenor norge'},
'47962':{'en': 'telenor norge'},
'47965':{'en': 'telenor norge'},
'479660':{'en': 'erate'},
'479661':{'en': 'erate'},
'479662':{'en': 'erate'},
'479663':{'en': 'erate'},
'479664':{'en': 'erate'},
'479665':{'en': 'telia'},
'479666':{'en': 'telia'},
'479667':{'en': 'telia'},
'479668':{'en': 'telia'},
'479669':{'en': 'telia'},
'479670':{'en': 'telia'},
'479671':{'en': 'telia'},
'479672':{'en': 'telia'},
'479673':{'en': 'telia'},
'479674':{'en': 'telia'},
'479675':{'en': 'telia'},
'479679':{'en': 'telenor norge'},
'47968':{'en': 'telia'},
'479689':{'en': 'telenor norge'},
'479690':{'en': 'erate'},
'479691':{'en': 'erate'},
'479692':{'en': 'erate'},
'479693':{'en': 'telenor norge'},
'479694':{'en': 'telia'},
'479695':{'en': 'lyca mobile'},
'479696':{'en': 'lyca mobile'},
'479697':{'en': 'lyca mobile'},
'479698':{'en': 'lyca mobile'},
'479699':{'en': 'lyca mobile'},
'4797':{'en': 'telenor norge'},
'479730':{'en': 'ice norge'},
'479731':{'en': 'ice norge'},
'479735':{'en': 'lyca mobile'},
'479736':{'en': 'lyca mobile'},
'479737':{'en': 'lyca mobile'},
'479738':{'en': 'lyca mobile'},
'479739':{'en': 'lyca mobile'},
'47978':{'en': 'telia'},
'479790':{'en': 'telia'},
'479791':{'en': 'telia'},
'479792':{'en': 'telia'},
'479793':{'en': 'telia'},
'479794':{'en': 'telia'},
'47980':{'en': 'telia'},
'47981':{'en': 'telia'},
'47982':{'en': 'telia'},
'47983':{'en': 'telia'},
'479838':{'en': 'telenor norge'},
'479839':{'en': 'telenor norge'},
'47984':{'en': 'telia'},
'47985':{'en': 'telenor norge'},
'479854':{'en': 'telia'},
'47986':{'en': 'telia'},
'479870':{'en': 'kvantel'},
'479876':{'en': 'telia'},
'479877':{'en': 'chilimobil'},
'47988':{'en': 'telia'},
'47989':{'en': 'telenor norge'},
'479890':{'en': 'telia'},
'479899':{'en': 'telia'},
'47990':{'en': 'telenor norge'},
'479908':{'en': 'telia'},
'479909':{'en': 'telia'},
'47991':{'en': 'telenor norge'},
'47992':{'en': 'telenor norge'},
'47993':{'en': 'telenor norge'},
'47994':{'en': 'telenor norge'},
'47995':{'en': 'telenor norge'},
'47996':{'en': 'telenor norge'},
'479967':{'en': 'telia'},
'479968':{'en': 'telia'},
'47997':{'en': 'telenor norge'},
'479980':{'en': 'telenor norge'},
'479981':{'en': 'telenor norge'},
'479982':{'en': 'telenor norge'},
'479983':{'en': 'telenor norge'},
'479984':{'en': 'telenor norge'},
'479985':{'en': 'telia'},
'479986':{'en': 'telia'},
'479987':{'en': 'telia'},
'479988':{'en': 'telia'},
'479989':{'en': 'telia'},
'4845':{'en': 'Rezerwa Prezesa UKE'},
'48450':{'en': 'Play'},
'484590':{'en': 'Play'},
'4845910':{'en': 'Play'},
'4845911':{'en': 'Play'},
'4845912':{'en': 'Play'},
'4845913':{'en': 'Play'},
'4845914':{'en': 'Play'},
'4845920':{'en': 'SIA Ntel Solutions'},
'484593':{'en': 'Play'},
'4845945':{'en': 'Plus'},
'484595':{'en': 'Plus'},
'4845950':{'en': 'SIA Ntel Solutions'},
'4845957':{'en': 'BSG ESTONIA OU'},
'4845958':{'en': 'TELESTRADA S.A.'},
'4845959':{'en': 'TELESTRADA S.A.'},
'484598':{'en': 'Plus'},
'4850':{'en': 'Orange'},
'4851':{'en': 'Orange'},
'4853':{'en': 'Play'},
'48532':{'en': 'T-Mobile'},
'485366':{'en': 'Plus'},
'48538':{'en': 'T-Mobile'},
'48539':{'en': 'T-Mobile'},
'4857':{'en': 'Play'},
'48571':{'en': 'Orange'},
'485717':{'en': 'Rezerwa Prezesa UKE'},
'485718':{'en': 'Rezerwa Prezesa UKE'},
'485719':{'en': 'Rezerwa Prezesa UKE'},
'48572':{'en': 'Orange'},
'48573':{'en': 'Orange'},
'485735':{'en': 'Rezerwa Prezesa UKE'},
'485736':{'en': 'Rezerwa Prezesa UKE'},
'485737':{'en': 'Rezerwa Prezesa UKE'},
'485738':{'en': 'Rezerwa Prezesa UKE'},
'485791':{'en': 'Plus'},
'485792':{'en': 'Plus'},
'485793':{'en': 'Plus'},
'4857941':{'en': 'Messagebird B.V.'},
'4857942':{'en': 'SIA NetBalt'},
'4857946':{'en': 'Plus'},
'4857947':{'en': 'Plus'},
'4857948':{'en': 'SIA Ntel Solutions'},
'4857949':{'en': 'Plus'},
'4857950':{'en': 'Plus'},
'4857953':{'en': 'SIA NetBalt'},
'4857958':{'en': 'NIMBUSFIVE GmbH'},
'485797':{'en': 'Plus'},
'48600':{'en': 'T-Mobile'},
'48601':{'en': 'Plus'},
'48602':{'en': 'T-Mobile'},
'48603':{'en': 'Plus'},
'48604':{'en': 'T-Mobile'},
'48605':{'en': 'Plus'},
'48606':{'en': 'T-Mobile'},
'48607':{'en': 'Plus'},
'48608':{'en': 'T-Mobile'},
'48609':{'en': 'Plus'},
'48660':{'en': 'T-Mobile'},
'48661':{'en': 'Plus'},
'48662':{'en': 'T-Mobile'},
'48663':{'en': 'Plus'},
'48664':{'en': 'T-Mobile'},
'48665':{'en': 'Plus'},
'48666':{'en': 'T-Mobile'},
'486666':{'en': 'Play'},
'48667':{'en': 'Plus'},
'48668':{'en': 'T-Mobile'},
'48669':{'en': 'Plus'},
'48690':{'en': 'Orange'},
'486900':{'en': 'Play'},
'486907':{'en': 'Play'},
'486908':{'en': 'Play'},
'486909':{'en': 'Play'},
'48691':{'en': 'Plus'},
'48692':{'en': 'T-Mobile'},
'48693':{'en': 'Plus'},
'48694':{'en': 'T-Mobile'},
'48695':{'en': 'Plus'},
'48696':{'en': 'T-Mobile'},
'48697':{'en': 'Plus'},
'48698':{'en': 'T-Mobile'},
'48699':{'en': 'Plus'},
'4869901':{'en': 'AMD Telecom S.A.'},
'4869922':{'en': 'Play'},
'4869950':{'en': 'AMD Telecom S.A.'},
'4869951':{'en': 'Mobiledata Sp. z o.o.'},
'4869952':{'en': 'Mobiledata Sp. z o.o.'},
'4869953':{'en': 'Mobiledata Sp. z o.o.'},
'4869954':{'en': 'Mobiledata Sp. z o.o.'},
'4869955':{'en': 'Mobiledata Sp. z o.o.'},
'4869956':{'en': 'Twilio Ireland Limited'},
'4869957':{'en': 'Softelnet S.A. Sp. k.'},
'4869958':{'en': 'Rezerwa Prezesa UKE'},
'4869959':{'en': 'Move Telecom S.A.'},
'4869960':{'en': 'Play'},
'4869970':{'en': 'Play'},
'4869974':{'en': 'Compatel Limited'},
'4869978':{'en': 'VOXBONE SA'},
'4869979':{'en': 'Play'},
'486998':{'en': 'Play'},
'4872':{'en': 'Plus'},
'487208':{'en': 'Play'},
'487271':{'en': 'Nordisk'},
'487272':{'en': 'T-Mobile'},
'487273':{'en': 'T-Mobile'},
'48728':{'en': 'T-Mobile'},
'487290':{'en': 'Play'},
'487291':{'en': 'Play'},
'4872970':{'en': 'AMD Telecom S.A.'},
'4872972':{'en': 'Compatel Limited'},
'4872973':{'en': 'Play'},
'4872974':{'en': 'Play'},
'4872975':{'en': 'Rezerwa Prezesa UKE'},
'4872977':{'en': 'INTERNETIA Sp. o.o.'},
'4872978':{'en': 'Play'},
'4872979':{'en': 'Play'},
'4872980':{'en': 'Play'},
'4872981':{'en': 'Play'},
'4872982':{'en': 'Play'},
'4872983':{'en': 'Rezerwa Prezesa UKE'},
'4872984':{'en': 'Rezerwa Prezesa UKE'},
'4872985':{'en': 'Rezerwa Prezesa UKE'},
'4872986':{'en': 'Rezerwa Prezesa UKE'},
'4872987':{'en': 'Premium Mobile SA'},
'4872988':{'en': 'Premium Mobile SA'},
'4872989':{'en': 'Premium Mobile SA'},
'4872990':{'en': 'TELCO LEADERS LTD'},
'48730':{'en': 'Play'},
'48731':{'en': 'Play'},
'48732':{'en': 'Play'},
'48733':{'en': 'Play'},
'48734':{'en': 'T-Mobile'},
'48735':{'en': 'T-Mobile'},
'48736':{'en': 'T-Mobile'},
'487360':{'en': 'Play'},
'487367':{'en': 'Play'},
'487368':{'en': 'Play'},
'487369':{'en': 'Play'},
'48737':{'en': 'Play'},
'487370':{'en': 'Plus'},
'487371':{'en': 'Plus'},
'487372':{'en': 'Plus'},
'48738':{'en': 'PKP Polskie Linie Kolejowe S.A.'},
'48739':{'en': 'Plus'},
'487390':{'en': 'Play'},
'487391':{'en': 'Play'},
'487392':{'en': 'Play'},
'4873930':{'en': 'Play'},
'4873990':{'en': 'Play'},
'4873991':{'en': 'AGILE TELECOM POLAND'},
'4873992':{'en': 'MobiWeb Telecom Limited'},
'4873993':{'en': 'SIA NetBalt'},
'4873997':{'en': 'Play'},
'4873998':{'en': 'Play'},
'4873999':{'en': 'Play'},
'487800':{'en': 'Orange'},
'487801':{'en': 'Orange'},
'487802':{'en': 'Play'},
'4878020':{'en': 'Plus'},
'4878025':{'en': 'Interactive Digital Media GmbH'},
'4878026':{'en': 'SIA NetBalt'},
'4878029':{'en': 'SMSHIGHWAY LIMITED'},
'487803':{'en': 'T-Mobile'},
'487804':{'en': 'Rezerwa Prezesa UKE'},
'4878040':{'en': 'Plus'},
'487805':{'en': 'Orange'},
'487806':{'en': 'Orange'},
'487807':{'en': 'Play'},
'487808':{'en': 'Play'},
'487809':{'en': 'Rezerwa Prezesa UKE'},
'48781':{'en': 'Plus'},
'48782':{'en': 'Plus'},
'48783':{'en': 'Plus'},
'48784':{'en': 'T-Mobile'},
'48785':{'en': 'Plus'},
'487860':{'en': 'Plus'},
'4878607':{'en': 'Play'},
'4878608':{'en': 'Play'},
'487861':{'en': 'Play'},
'487862':{'en': 'Play'},
'487863':{'en': 'Play'},
'487864':{'en': 'Play'},
'487865':{'en': 'Rezerwa Prezesa UKE'},
'487866':{'en': 'Rezerwa Prezesa UKE'},
'487867':{'en': 'Rezerwa Prezesa UKE'},
'4878678':{'en': 'Play'},
'487868':{'en': 'Orange'},
'487869':{'en': 'Orange'},
'48787':{'en': 'T-Mobile'},
'48788':{'en': 'T-Mobile'},
'487890':{'en': 'Orange'},
'487891':{'en': 'Orange'},
'487892':{'en': 'Orange'},
'487893':{'en': 'Orange'},
'487894':{'en': 'Orange'},
'487895':{'en': 'Plus'},
'487896':{'en': 'Plus'},
'487897':{'en': 'Plus'},
'487898':{'en': 'Plus'},
'487899':{'en': 'Plus'},
'4879':{'en': 'Play'},
'487951':{'en': 'T-Mobile'},
'487952':{'en': 'T-Mobile'},
'487953':{'en': 'T-Mobile'},
'487954':{'en': 'T-Mobile'},
'487955':{'en': 'T-Mobile'},
'48797':{'en': 'Orange'},
'48798':{'en': 'Orange'},
'487990':{'en': 'Orange'},
'487996':{'en': 'Orange'},
'48880':{'en': 'T-Mobile'},
'48881':{'en': 'Play'},
'488810':{'en': 'T-Mobile'},
'488811':{'en': 'Plus'},
'488818':{'en': 'T-Mobile'},
'488819':{'en': 'T-Mobile'},
'48882':{'en': 'T-Mobile'},
'48883':{'en': 'Play'},
'488833':{'en': 'T-Mobile'},
'488838':{'en': 'T-Mobile'},
'48884':{'en': 'Play'},
'488841':{'en': 'T-Mobile'},
'488842':{'en': 'T-Mobile'},
'488844':{'en': 'Plus'},
'488845':{'en': 'Rezerwa Prezesa UKE'},
'48885':{'en': 'Plus'},
'48886':{'en': 'T-Mobile'},
'48887':{'en': 'Plus'},
'48888':{'en': 'T-Mobile'},
'48889':{'en': 'T-Mobile'},
'4915020':{'en': 'Interactive digital media'},
'4915050':{'en': 'NAKA AG'},
'4915080':{'en': 'Easy World'},
'49151':{'en': 'T-Mobile'},
'491520':{'en': 'Vodafone'},
'491521':{'en': 'Vodafone/Lycamobile'},
'491522':{'en': 'Vodafone'},
'491523':{'en': 'Vodafone'},
'491525':{'en': 'Vodafone'},
'491526':{'en': 'Vodafone'},
'491529':{'en': 'Vodafone/Truphone'},
'4915555':{'en': 'Tismi BV'},
'4915566':{'en': 'Drillisch Online'},
'4915630':{'en': 'Multiconnect'},
'4915678':{'en': 'Argon Networks'},
'491570':{'en': 'Eplus/Telogic'},
'491573':{'en': 'Eplus'},
'491575':{'en': 'Eplus'},
'491577':{'en': 'Eplus'},
'491578':{'en': 'Eplus'},
'491579':{'en': 'Eplus/Sipgate'},
'4915888':{'en': 'TelcoVillage'},
'491590':{'en': 'O2'},
'49160':{'en': 'T-Mobile'},
'49162':{'en': 'Vodafone'},
'49163':{'en': 'Eplus'},
'49170':{'en': 'T-Mobile'},
'49171':{'en': 'T-Mobile'},
'49172':{'en': 'Vodafone'},
'49173':{'en': 'Vodafone'},
'49174':{'en': 'Vodafone'},
'49175':{'en': 'T-Mobile'},
'49176':{'en': 'O2'},
'49177':{'en': 'Eplus'},
'49178':{'en': 'Eplus'},
'49179':{'en': 'O2'},
'5005':{'en': 'Sure South Atlantic Limited'},
'5006':{'en': 'Sure South Atlantic Limited'},
'50160':{'en': 'Belize Telemedia Ltd (Digi)'},
'50161':{'en': 'Belize Telemedia Ltd (Digi)'},
'50162':{'en': 'Belize Telemedia Ltd (Digi)'},
'50163':{'en': 'Belize Telemedia Ltd (Digi)'},
'50165':{'en': 'Speednet (Smart)'},
'50166':{'en': 'Speednet (Smart)'},
'50167':{'en': 'Speednet (Smart)'},
'50230':{'en': 'Tigo'},
'50231':{'en': 'Tigo'},
'50232':{'en': 'Tigo'},
'5023229':{'en': 'Telgua'},
'50233':{'en': 'Tigo'},
'50234':{'en': 'Movistar'},
'502350':{'en': 'Movistar'},
'502351':{'en': 'Movistar'},
'502352':{'en': 'Movistar'},
'502353':{'en': 'Movistar'},
'502354':{'en': 'Movistar'},
'502355':{'en': 'Movistar'},
'502356':{'en': 'Movistar'},
'502370':{'en': 'Tigo'},
'502371':{'en': 'Tigo'},
'502372':{'en': 'Tigo'},
'502373':{'en': 'Tigo'},
'502374':{'en': 'Tigo'},
'50240':{'en': 'Tigo'},
'502400':{'en': 'Movistar'},
'50241':{'en': 'Telgua'},
'50242':{'en': 'Telgua'},
'50243':{'en': 'Movistar'},
'50244':{'en': 'Movistar'},
'5024476':{'en': 'Tigo'},
'5024477':{'en': 'Tigo'},
'5024478':{'en': 'Tigo'},
'5024479':{'en': 'Tigo'},
'502448':{'en': 'Tigo'},
'502449':{'en': 'Tigo'},
'50245':{'en': 'Tigo'},
'50246':{'en': 'Tigo'},
'50247':{'en': 'Telgua'},
'502477':{'en': 'Tigo'},
'502478':{'en': 'Tigo'},
'502479':{'en': 'Tigo'},
'50248':{'en': 'Tigo'},
'50249':{'en': 'Tigo'},
'502500':{'en': 'Tigo'},
'502501':{'en': 'Telgua'},
'502502':{'en': 'Movistar'},
'502503':{'en': 'Tigo'},
'502504':{'en': 'Tigo'},
'502505':{'en': 'Tigo'},
'502506':{'en': 'Tigo'},
'502507':{'en': 'Movistar'},
'502508':{'en': 'Movistar'},
'502509':{'en': 'Movistar'},
'502510':{'en': 'Movistar'},
'502511':{'en': 'Telgua'},
'502512':{'en': 'Telgua'},
'502513':{'en': 'Telgua'},
'502514':{'en': 'Movistar'},
'502515':{'en': 'Tigo'},
'502516':{'en': 'Tigo'},
'502517':{'en': 'Tigo'},
'502518':{'en': 'Tigo'},
'502519':{'en': 'Tigo'},
'50252':{'en': 'Movistar'},
'502520':{'en': 'Tigo'},
'50253':{'en': 'Tigo'},
'5025310':{'en': 'Telgua'},
'5025311':{'en': 'Telgua'},
'5025312':{'en': 'Movistar'},
'5025313':{'en': 'Movistar'},
'502539':{'en': 'Movistar'},
'50254':{'en': 'Telgua'},
'502540':{'en': 'Movistar'},
'502550':{'en': 'Movistar'},
'502551':{'en': 'Telgua'},
'5025518':{'en': 'Movistar'},
'5025519':{'en': 'Movistar'},
'502552':{'en': 'Tigo'},
'5025531':{'en': 'Telgua'},
'5025532':{'en': 'Telgua'},
'5025533':{'en': 'Telgua'},
'5025534':{'en': 'Telgua'},
'5025535':{'en': 'Telgua'},
'5025536':{'en': 'Telgua'},
'5025537':{'en': 'Telgua'},
'5025538':{'en': 'Telgua'},
'5025539':{'en': 'Telgua'},
'502554':{'en': 'Movistar'},
'5025543':{'en': 'Telgua'},
'5025544':{'en': 'Telgua'},
'502555':{'en': 'Telgua'},
'5025550':{'en': 'Tigo'},
'5025551':{'en': 'Tigo'},
'5025552':{'en': 'Tigo'},
'5025553':{'en': 'Tigo'},
'502556':{'en': 'Telgua'},
'502557':{'en': 'Telgua'},
'502558':{'en': 'Telgua'},
'5025580':{'en': 'Tigo'},
'5025581':{'en': 'Tigo'},
'502559':{'en': 'Telgua'},
'50256':{'en': 'Movistar'},
'502561':{'en': 'Telgua'},
'502562':{'en': 'Telgua'},
'502563':{'en': 'Telgua'},
'502569':{'en': 'Telgua'},
'50257':{'en': 'Tigo'},
'502571':{'en': 'Telgua'},
'502579':{'en': 'Movistar'},
'50258':{'en': 'Telgua'},
'502580':{'en': 'Tigo'},
'5025819':{'en': 'Tigo'},
'502588':{'en': 'Tigo'},
'502589':{'en': 'Tigo'},
'50259':{'en': 'Telgua'},
'502590':{'en': 'Tigo'},
'5025915':{'en': 'Movistar'},
'5025916':{'en': 'Movistar'},
'5025917':{'en': 'Movistar'},
'5025918':{'en': 'Tigo'},
'5025919':{'en': 'Tigo'},
'502599':{'en': 'Tigo'},
'503600':{'en': 'Tigo'},
'503601':{'en': 'Tigo'},
'503602':{'en': 'Tigo'},
'503603':{'en': 'Tigo'},
'503604':{'en': 'Tigo'},
'503605':{'en': 'Tigo'},
'503609':{'en': 'Tigo'},
'50361':{'en': 'Movistar'},
'503620':{'en': 'Digicel'},
'503630':{'en': 'Claro'},
'5036310':{'en': 'Claro'},
'5036311':{'en': 'Claro'},
'5036312':{'en': 'Claro'},
'5036313':{'en': 'Claro'},
'5036314':{'en': 'Claro'},
'5036315':{'en': 'Claro'},
'5036316':{'en': 'Claro'},
'50363170':{'en': 'Claro'},
'50363171':{'en': 'Claro'},
'50363172':{'en': 'Claro'},
'50363173':{'en': 'Claro'},
'50363174':{'en': 'Claro'},
'503642':{'en': 'Movistar'},
'5036430':{'en': 'Movistar'},
'5036431':{'en': 'Movistar'},
'5036611':{'en': 'Movistar'},
'503700':{'en': 'Claro'},
'503701':{'en': 'Claro'},
'503702':{'en': 'Claro'},
'503703':{'en': 'Claro'},
'503704':{'en': 'Claro'},
'503705':{'en': 'Claro'},
'503706':{'en': 'Claro'},
'50370700':{'en': 'Claro'},
'50370701':{'en': 'Tigo'},
'50370702':{'en': 'Movistar'},
'50370703':{'en': 'Claro'},
'50370704':{'en': 'Claro'},
'50370705':{'en': 'Claro'},
'50370706':{'en': 'Tigo'},
'50370707':{'en': 'Claro'},
'50370708':{'en': 'Movistar'},
'50370709':{'en': 'Tigo'},
'50370710':{'en': 'Claro'},
'50370711':{'en': 'Movistar'},
'50370712':{'en': 'Claro'},
'50370713':{'en': 'Tigo'},
'50370714':{'en': 'Tigo'},
'50370715':{'en': 'Tigo'},
'50370716':{'en': 'Movistar'},
'50370717':{'en': 'Claro'},
'50370719':{'en': 'Tigo'},
'5037072':{'en': 'Digicel'},
'50370730':{'en': 'Digicel'},
'50370731':{'en': 'Digicel'},
'50370732':{'en': 'Digicel'},
'50370733':{'en': 'Digicel'},
'50370734':{'en': 'Digicel'},
'50370735':{'en': 'Claro'},
'50370736':{'en': 'Claro'},
'50370737':{'en': 'Claro'},
'50370738':{'en': 'Claro'},
'50370739':{'en': 'Claro'},
'50370740':{'en': 'Claro'},
'50370741':{'en': 'Claro'},
'50370742':{'en': 'Claro'},
'50370743':{'en': 'Claro'},
'50370744':{'en': 'Claro'},
'50370745':{'en': 'Claro'},
'50370746':{'en': 'Claro'},
'503708':{'en': 'Claro'},
'503709':{'en': 'Claro'},
'50371':{'en': 'Movistar'},
'50372':{'en': 'Tigo'},
'50373':{'en': 'Digicel'},
'50374':{'en': 'Digicel'},
'503745':{'en': 'Movistar'},
'503747':{'en': 'Tigo'},
'503748':{'en': 'Tigo'},
'503749':{'en': 'Tigo'},
'50375':{'en': 'Tigo'},
'50376':{'en': 'Claro'},
'503767':{'en': 'Tigo'},
'503768':{'en': 'Tigo'},
'50376865':{'en': 'Movistar'},
'50376866':{'en': 'Movistar'},
'50376867':{'en': 'Movistar'},
'50376868':{'en': 'Movistar'},
'50376869':{'en': 'Movistar'},
'5037691':{'en': 'Movistar'},
'5037692':{'en': 'Movistar'},
'5037693':{'en': 'Movistar'},
'5037694':{'en': 'Movistar'},
'5037695':{'en': 'Digicel'},
'5037696':{'en': 'Digicel'},
'5037697':{'en': 'Digicel'},
'5037698':{'en': 'Digicel'},
'5037699':{'en': 'Movistar'},
'503770':{'en': 'Movistar'},
'503771':{'en': 'Movistar'},
'503772':{'en': 'Tigo'},
'503773':{'en': 'Tigo'},
'503774':{'en': 'Claro'},
'503775':{'en': 'Claro'},
'503776':{'en': 'Digicel'},
'503777':{'en': 'Digicel'},
'5037780':{'en': 'Movistar'},
'5037781':{'en': 'Movistar'},
'5037782':{'en': 'Movistar'},
'5037783':{'en': 'Movistar'},
'5037784':{'en': 'Movistar'},
'5037785':{'en': 'Tigo'},
'5037786':{'en': 'Tigo'},
'5037787':{'en': 'Tigo'},
'5037788':{'en': 'Tigo'},
'5037789':{'en': 'Tigo'},
'5037790':{'en': 'Movistar'},
'5037791':{'en': 'Movistar'},
'5037792':{'en': 'Movistar'},
'5037793':{'en': 'Movistar'},
'5037794':{'en': 'Movistar'},
'5037795':{'en': 'Tigo'},
'5037796':{'en': 'Tigo'},
'5037797':{'en': 'Tigo'},
'5037798':{'en': 'Tigo'},
'5037799':{'en': 'Tigo'},
'5037800':{'en': 'Movistar'},
'5037801':{'en': 'Digicel'},
'50378020':{'en': 'Digicel'},
'50378021':{'en': 'Digicel'},
'50378022':{'en': 'Digicel'},
'50378023':{'en': 'Digicel'},
'50378024':{'en': 'Digicel'},
'50378025':{'en': 'Claro'},
'50378026':{'en': 'Claro'},
'50378027':{'en': 'Claro'},
'50378028':{'en': 'Claro'},
'50378029':{'en': 'Claro'},
'5037803':{'en': 'Claro'},
'5037805':{'en': 'Claro'},
'5037806':{'en': 'Claro'},
'5037807':{'en': 'Claro'},
'5037808':{'en': 'Claro'},
'5037809':{'en': 'Claro'},
'503781':{'en': 'Movistar'},
'503782':{'en': 'Movistar'},
'503783':{'en': 'Movistar'},
'5037840':{'en': 'Claro'},
'5037841':{'en': 'Claro'},
'5037842':{'en': 'Claro'},
'5037843':{'en': 'Claro'},
'5037844':{'en': 'Claro'},
'5037845':{'en': 'Movistar'},
'5037846':{'en': 'Movistar'},
'5037847':{'en': 'Movistar'},
'5037848':{'en': 'Movistar'},
'5037849':{'en': 'Movistar'},
'503785':{'en': 'Claro'},
'503786':{'en': 'Claro'},
'503787':{'en': 'Tigo'},
'503788':{'en': 'Tigo'},
'503789':{'en': 'Tigo'},
'503790':{'en': 'Tigo'},
'503791':{'en': 'Tigo'},
'503792':{'en': 'Tigo'},
'503793':{'en': 'Tigo'},
'503794':{'en': 'Tigo'},
'503795':{'en': 'Claro'},
'503796':{'en': 'Claro'},
'503797':{'en': 'Digicel'},
'5037980':{'en': 'Intelfon'},
'5037981':{'en': 'Intelfon'},
'5037982':{'en': 'Intelfon'},
'5037983':{'en': 'Intelfon'},
'5037984':{'en': 'Intelfon'},
'5037985':{'en': 'Claro'},
'5037986':{'en': 'Claro'},
'5037987':{'en': 'Claro'},
'5037988':{'en': 'Claro'},
'5037989':{'en': 'Claro'},
'503799':{'en': 'Movistar'},
'5043':{'en': 'Sercom (Claro)'},
'5047':{'en': 'HONDUTEL'},
'5048':{'en': 'Digicel Honduras'},
'5049':{'en': 'Celtel (Tigo)'},
'5055':{'en': 'Claro'},
'5056':{'en': 'CooTel'},
'5057':{'en': 'Movistar'},
'50581':{'en': 'Movistar'},
'50582':{'en': 'Movistar'},
'505820':{'en': 'Claro'},
'505821':{'en': 'Claro'},
'505822':{'en': 'Claro'},
'505823':{'en': 'Claro'},
'505832':{'en': 'Movistar'},
'505833':{'en': 'Claro'},
'505835':{'en': 'Claro'},
'505836':{'en': 'Claro'},
'505837':{'en': 'Movistar'},
'505838':{'en': 'Movistar'},
'505839':{'en': 'Movistar'},
'50584':{'en': 'Claro'},
'505845':{'en': 'Movistar'},
'505846':{'en': 'Movistar'},
'505847':{'en': 'Movistar'},
'505848':{'en': 'Movistar'},
'505850':{'en': 'Claro'},
'505851':{'en': 'Claro'},
'505852':{'en': 'Claro'},
'505853':{'en': 'Claro'},
'505854':{'en': 'Claro'},
'505855':{'en': 'Movistar'},
'505856':{'en': 'Movistar'},
'505857':{'en': 'Movistar'},
'505858':{'en': 'Movistar'},
'505859':{'en': 'Movistar'},
'50586':{'en': 'Claro'},
'505867':{'en': 'Movistar'},
'505868':{'en': 'Movistar'},
'505870':{'en': 'Claro'},
'505871':{'en': 'Claro'},
'505872':{'en': 'Claro'},
'505873':{'en': 'Claro'},
'505874':{'en': 'Claro'},
'505875':{'en': 'Movistar'},
'505876':{'en': 'Movistar'},
'505877':{'en': 'Movistar'},
'505878':{'en': 'Movistar'},
'505879':{'en': 'Movistar'},
'50588':{'en': 'Movistar'},
'505882':{'en': 'Claro'},
'505883':{'en': 'Claro'},
'505884':{'en': 'Claro'},
'505885':{'en': 'Claro'},
'505890':{'en': 'Claro'},
'505891':{'en': 'Claro'},
'505892':{'en': 'Claro'},
'505893':{'en': 'Claro'},
'505894':{'en': 'Claro'},
'505895':{'en': 'Movistar'},
'505896':{'en': 'Movistar'},
'505897':{'en': 'Movistar'},
'505898':{'en': 'Movistar'},
'505899':{'en': 'Movistar'},
'5063':{'en': 'Kolbi ICE'},
'50650':{'en': 'Kolbi ICE'},
'50657':{'en': 'Kolbi ICE'},
'5066':{'en': 'Movistar'},
'5067000':{'en': 'Claro'},
'50670010':{'en': 'Claro'},
'50670011':{'en': 'Claro'},
'50670012':{'en': 'Claro'},
'50670013':{'en': 'Claro'},
'50670014':{'en': 'Claro'},
'5067002':{'en': 'Claro'},
'5067003':{'en': 'Claro'},
'5067004':{'en': 'Claro'},
'5067005':{'en': 'Claro'},
'5067006':{'en': 'Claro'},
'5067007':{'en': 'Claro'},
'5067008':{'en': 'Claro'},
'5067009':{'en': 'Claro'},
'506701':{'en': 'Claro'},
'506702':{'en': 'Claro'},
'506703':{'en': 'Claro'},
'506704':{'en': 'Claro'},
'506705':{'en': 'Claro'},
'506706':{'en': 'Claro'},
'506707':{'en': 'Claro'},
'506708':{'en': 'Claro'},
'506709':{'en': 'Claro'},
'50671':{'en': 'Claro'},
'50672':{'en': 'Claro'},
'5067300':{'en': 'Claro'},
'5067301':{'en': 'Claro'},
'50683':{'en': 'Kolbi ICE'},
'50684':{'en': 'Kolbi ICE'},
'50685':{'en': 'Kolbi ICE'},
'50686':{'en': 'Kolbi ICE'},
'50687':{'en': 'Kolbi ICE'},
'50688':{'en': 'Kolbi ICE'},
'50689':{'en': 'Kolbi ICE'},
'507111':{'en': 'Claro'},
'507161':{'en': 'Cable & Wireless'},
'507218':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507219':{'en': u('Telef\u00f3nica M\u00f3viles')},
'50760':{'en': 'Digicel'},
'50761':{'en': 'Digicel'},
'507616':{'en': u('Telef\u00f3nica M\u00f3viles')},
'50762':{'en': 'Claro'},
'507630':{'en': 'Claro'},
'507631':{'en': 'Claro'},
'507632':{'en': 'Claro'},
'507633':{'en': 'Cable & Wireless'},
'507634':{'en': 'Cable & Wireless'},
'507635':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507636':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507637':{'en': 'Cable & Wireless'},
'507638':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507639':{'en': u('Telef\u00f3nica M\u00f3viles')},
'50764':{'en': u('Telef\u00f3nica M\u00f3viles')},
'50765':{'en': 'Cable & Wireless'},
'507656':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507657':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507658':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507659':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507660':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507661':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507662':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507663':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507664':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507665':{'en': 'Cable & Wireless'},
'507666':{'en': 'Cable & Wireless'},
'507667':{'en': 'Cable & Wireless'},
'507668':{'en': 'Cable & Wireless'},
'507669':{'en': 'Cable & Wireless'},
'50767':{'en': 'Cable & Wireless'},
'50768':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507680':{'en': 'Cable & Wireless'},
'507684':{'en': 'Cable & Wireless'},
'507687':{'en': 'Cable & Wireless'},
'507688':{'en': 'Cable & Wireless'},
'50769':{'en': 'Cable & Wireless'},
'507692':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507693':{'en': u('Telef\u00f3nica M\u00f3viles')},
'507697':{'en': u('Telef\u00f3nica M\u00f3viles')},
'50781':{'en': 'Mobilphone'},
'507872':{'en': 'Cable & Wireless'},
'507873':{'en': 'Cable & Wireless'},
'50840':{'en': 'Globaltel'},
'50842':{'en': 'Orange'},
'50843':{'en': 'Diabolocom'},
'50844':{'en': 'Globaltel'},
'50850':{'en': 'Keyyo'},
'50855':{'en': 'SPM Telecom'},
'50930':{'en': 'Digicel'},
'50931':{'en': 'Digicel'},
'50934':{'en': 'Digicel'},
'50936':{'en': 'Digicel'},
'50937':{'en': 'Digicel'},
'50938':{'en': 'Digicel'},
'50939':{'en': 'Digicel'},
'50940':{'en': 'Natcom'},
'50941':{'en': 'Natcom'},
'50942':{'en': 'Natcom'},
'50943':{'en': 'Natcom'},
'50944':{'en': 'Digicel'},
'50946':{'en': 'Digicel'},
'50947':{'en': 'Digicel'},
'50948':{'en': 'Digicel'},
'50949':{'en': 'Digicel'},
'51900':{'en': 'Claro'},
'51901':{'en': 'Claro'},
'51910':{'en': 'Claro'},
'51912':{'en': 'Entel'},
'51913':{'en': 'Claro'},
'51914':{'en': 'Claro'},
'51915':{'en': 'Claro'},
'51916':{'en': 'Claro'},
'51917':{'en': 'Claro'},
'51918':{'en': 'Claro'},
'519190':{'en': 'Claro'},
'519191':{'en': 'Claro'},
'5191920':{'en': 'Claro'},
'5191921':{'en': 'Claro'},
'5191922':{'en': 'Claro'},
'5191923':{'en': 'Claro'},
'5191924':{'en': 'Claro'},
'5191925':{'en': 'Claro'},
'5191926':{'en': 'Claro'},
'5191927':{'en': 'Claro'},
'51920':{'en': 'Movistar'},
'51921':{'en': 'Claro'},
'51922':{'en': 'Entel'},
'51923':{'en': 'Entel'},
'51924':{'en': 'Entel'},
'51925':{'en': 'Claro'},
'519260':{'en': 'Claro'},
'519261':{'en': 'Claro'},
'519262':{'en': 'Claro'},
'5192630':{'en': 'Claro'},
'5192631':{'en': 'Claro'},
'5192632':{'en': 'Claro'},
'5192633':{'en': 'Claro'},
'5192634':{'en': 'Claro'},
'5192635':{'en': 'Claro'},
'5192638':{'en': 'Entel'},
'5192639':{'en': 'Entel'},
'519264':{'en': 'Claro'},
'519265':{'en': 'Claro'},
'519266':{'en': 'Entel'},
'519267':{'en': 'Entel'},
'519268':{'en': 'Entel'},
'519269':{'en': 'Entel'},
'51927':{'en': 'Claro'},
'51928':{'en': 'Claro'},
'51929':{'en': 'Claro'},
'51930':{'en': 'Claro'},
'51931':{'en': 'Claro'},
'51932':{'en': 'Claro'},
'519327':{'en': 'Movistar'},
'519328':{'en': 'Movistar'},
'519329':{'en': 'Movistar'},
'51933':{'en': 'Entel'},
'51934':{'en': 'Entel'},
'51935':{'en': 'Claro'},
'51936':{'en': 'Entel'},
'51937':{'en': 'Movistar'},
'519370':{'en': 'Entel'},
'519371':{'en': 'Entel'},
'519372':{'en': 'Entel'},
'519373':{'en': 'Claro'},
'5193730':{'en': 'Entel'},
'5193731':{'en': 'Entel'},
'5193732':{'en': 'Entel'},
'5193733':{'en': 'Entel'},
'51938':{'en': 'Movistar'},
'51939':{'en': 'Movistar'},
'51940':{'en': 'Claro'},
'51941':{'en': 'Claro'},
'519418':{'en': 'Movistar'},
'519419':{'en': 'Movistar'},
'51942':{'en': 'Movistar'},
'519422':{'en': 'Claro'},
'519423':{'en': 'Claro'},
'519427':{'en': 'Claro'},
'51943':{'en': 'Movistar'},
'519433':{'en': 'Claro'},
'519435':{'en': 'Claro'},
'519437':{'en': 'Claro'},
'51944':{'en': 'Claro'},
'519444':{'en': 'Movistar'},
'519446':{'en': 'Movistar'},
'519448':{'en': 'Movistar'},
'519449':{'en': 'Movistar'},
'51945':{'en': 'Movistar'},
'51946':{'en': 'Entel'},
'519466':{'en': 'Claro'},
'519467':{'en': 'Claro'},
'519468':{'en': 'Claro'},
'5194680':{'en': 'Movistar'},
'5194681':{'en': 'Movistar'},
'5194682':{'en': 'Movistar'},
'5194683':{'en': 'Movistar'},
'519469':{'en': 'Movistar'},
'51947':{'en': 'Movistar'},
'519471':{'en': 'Entel'},
'519472':{'en': 'Entel'},
'519473':{'en': 'Entel'},
'519477':{'en': 'Claro'},
'51948':{'en': 'Movistar'},
'5194805':{'en': 'Claro'},
'5194806':{'en': 'Claro'},
'5194807':{'en': 'Claro'},
'5194808':{'en': 'Claro'},
'5194809':{'en': 'Claro'},
'519482':{'en': 'Claro'},
'519483':{'en': 'Claro'},
'519487':{'en': 'Claro'},
'519490':{'en': 'Movistar'},
'5194907':{'en': 'Claro'},
'5194908':{'en': 'Claro'},
'5194909':{'en': 'Claro'},
'519491':{'en': 'Claro'},
'519492':{'en': 'Claro'},
'519493':{'en': 'Claro'},
'519494':{'en': 'Movistar'},
'519495':{'en': 'Movistar'},
'519496':{'en': 'Movistar'},
'519497':{'en': 'Claro'},
'5194978':{'en': 'Movistar'},
'5194979':{'en': 'Movistar'},
'519498':{'en': 'Movistar'},
'5194990':{'en': 'Movistar'},
'5194991':{'en': 'Movistar'},
'5194992':{'en': 'Movistar'},
'5194993':{'en': 'Movistar'},
'5194994':{'en': 'Movistar'},
'5194995':{'en': 'Movistar'},
'5194996':{'en': 'Movistar'},
'5194997':{'en': 'Movistar'},
'51949980':{'en': 'Movistar'},
'51949981':{'en': 'Movistar'},
'519499822':{'en': 'Movistar'},
'519499823':{'en': 'Movistar'},
'519499824':{'en': 'Movistar'},
'519499825':{'en': 'Movistar'},
'519499826':{'en': 'Movistar'},
'519499827':{'en': 'Movistar'},
'519499828':{'en': 'Movistar'},
'519499829':{'en': 'Movistar'},
'51949983':{'en': 'Movistar'},
'51949984':{'en': 'Movistar'},
'51949985':{'en': 'Movistar'},
'51949986':{'en': 'Movistar'},
'519499875':{'en': 'Movistar'},
'519499876':{'en': 'Movistar'},
'519499877':{'en': 'Movistar'},
'519499878':{'en': 'Movistar'},
'519499879':{'en': 'Movistar'},
'5194999':{'en': 'Movistar'},
'5195':{'en': 'Movistar'},
'519501':{'en': 'Claro'},
'5195010':{'en': 'Entel'},
'519502':{'en': 'Claro'},
'519503':{'en': 'Claro'},
'519507':{'en': 'Claro'},
'519511':{'en': 'Claro'},
'519512':{'en': 'Claro'},
'519513':{'en': 'Claro'},
'519517':{'en': 'Claro'},
'519521':{'en': 'Claro'},
'5195210':{'en': 'Entel'},
'519523':{'en': 'Claro'},
'519524':{'en': 'Claro'},
'5195270':{'en': 'Claro'},
'5195271':{'en': 'Claro'},
'5195272':{'en': 'Claro'},
'51953':{'en': 'Claro'},
'5195310':{'en': 'Entel'},
'519541':{'en': 'Claro'},
'5195420':{'en': 'Claro'},
'5195430':{'en': 'Claro'},
'519547':{'en': 'Claro'},
'51955':{'en': 'Entel'},
'519557':{'en': 'Claro'},
'519562':{'en': 'Claro'},
'519563':{'en': 'Claro'},
'519567':{'en': 'Claro'},
'519570':{'en': 'Claro'},
'519571':{'en': 'Claro'},
'519572':{'en': 'Claro'},
'519573':{'en': 'Claro'},
'519577':{'en': 'Claro'},
'5195805':{'en': 'Claro'},
'5195806':{'en': 'Claro'},
'5195807':{'en': 'Claro'},
'5195808':{'en': 'Claro'},
'5195809':{'en': 'Claro'},
'519581':{'en': 'Claro'},
'519582':{'en': 'Claro'},
'519583':{'en': 'Claro'},
'5195847':{'en': 'Claro'},
'5195848':{'en': 'Claro'},
'5195849':{'en': 'Claro'},
'519587':{'en': 'Claro'},
'5195895':{'en': 'Claro'},
'5195896':{'en': 'Claro'},
'5195897':{'en': 'Claro'},
'5195898':{'en': 'Claro'},
'5195899':{'en': 'Claro'},
'519591':{'en': 'Claro'},
'519592':{'en': 'Claro'},
'519593':{'en': 'Claro'},
'519597':{'en': 'Claro'},
'5196004':{'en': 'Claro'},
'5196005':{'en': 'Claro'},
'5196006':{'en': 'Claro'},
'5196007':{'en': 'Claro'},
'5196008':{'en': 'Claro'},
'5196009':{'en': 'Claro'},
'519601':{'en': 'Entel'},
'519602':{'en': 'Entel'},
'519603':{'en': 'Entel'},
'519604':{'en': 'Entel'},
'519605':{'en': 'Entel'},
'519606':{'en': 'Entel'},
'519607':{'en': 'Entel'},
'519608':{'en': 'Entel'},
'519609':{'en': 'Entel'},
'519610':{'en': 'Movistar'},
'519611':{'en': 'Movistar'},
'519612':{'en': 'Claro'},
'519613':{'en': 'Claro'},
'519614':{'en': 'Claro'},
'519615':{'en': 'Movistar'},
'519616':{'en': 'Movistar'},
'519617':{'en': 'Claro'},
'519618':{'en': 'Claro'},
'519619':{'en': 'Movistar'},
'51962':{'en': 'Movistar'},
'519622':{'en': 'Claro'},
'519623':{'en': 'Claro'},
'519627':{'en': 'Claro'},
'51963':{'en': 'Claro'},
'5196350':{'en': 'Movistar'},
'5196351':{'en': 'Movistar'},
'5196352':{'en': 'Movistar'},
'5196353':{'en': 'Movistar'},
'5196354':{'en': 'Movistar'},
'519636':{'en': 'Movistar'},
'519639':{'en': 'Movistar'},
'5196396':{'en': 'Entel'},
'5196397':{'en': 'Entel'},
'51964':{'en': 'Movistar'},
'519641':{'en': 'Claro'},
'519642':{'en': 'Claro'},
'519643':{'en': 'Claro'},
'51965':{'en': 'Claro'},
'519650':{'en': 'Movistar'},
'519656':{'en': 'Movistar'},
'519658':{'en': 'Movistar'},
'519659':{'en': 'Movistar'},
'51966':{'en': 'Movistar'},
'519663':{'en': 'Claro'},
'519664':{'en': 'Claro'},
'519667':{'en': 'Claro'},
'51967':{'en': 'Claro'},
'5196765':{'en': 'Movistar'},
'5196766':{'en': 'Movistar'},
'5196768':{'en': 'Movistar'},
'5196769':{'en': 'Movistar'},
'5196790':{'en': 'Movistar'},
'5196791':{'en': 'Movistar'},
'5196798':{'en': 'Movistar'},
'5196799':{'en': 'Movistar'},
'51968':{'en': 'Movistar'},
'5196820':{'en': 'Entel'},
'5196821':{'en': 'Claro'},
'519683':{'en': 'Claro'},
'519687':{'en': 'Claro'},
'51969':{'en': 'Movistar'},
'519693':{'en': 'Claro'},
'519697':{'en': 'Claro'},
'51970':{'en': 'Entel'},
'519700':{'en': 'Movistar'},
'519702':{'en': 'Claro'},
'519709':{'en': 'Movistar'},
'51971':{'en': 'Movistar'},
'519720':{'en': 'Entel'},
'519721':{'en': 'Entel'},
'519722':{'en': 'Claro'},
'519723':{'en': 'Claro'},
'519724':{'en': 'Claro'},
'519725':{'en': 'Claro'},
'5197250':{'en': 'Movistar'},
'5197251':{'en': 'Movistar'},
'5197252':{'en': 'Movistar'},
'519726':{'en': 'Movistar'},
'519727':{'en': 'Claro'},
'519728':{'en': 'Movistar'},
'519729':{'en': 'Movistar'},
'51973':{'en': 'Claro'},
'519738':{'en': 'Movistar'},
'519739':{'en': 'Movistar'},
'51974':{'en': 'Claro'},
'519740':{'en': 'Movistar'},
'519741':{'en': 'Movistar'},
'5197410':{'en': 'Entel'},
'5197487':{'en': 'Movistar'},
'5197488':{'en': 'Movistar'},
'5197489':{'en': 'Movistar'},
'519749':{'en': 'Movistar'},
'51975':{'en': 'Movistar'},
'519760':{'en': 'Movistar'},
'519761':{'en': 'Movistar'},
'519762':{'en': 'Claro'},
'519763':{'en': 'Claro'},
'519766':{'en': 'Movistar'},
'519767':{'en': 'Movistar'},
'519768':{'en': 'Movistar'},
'519769':{'en': 'Movistar'},
'51977':{'en': 'Entel'},
'519770':{'en': 'Claro'},
'519771':{'en': 'Claro'},
'519772':{'en': 'Movistar'},
'51978':{'en': 'Movistar'},
'5197820':{'en': 'Claro'},
'5197821':{'en': 'Entel'},
'519783':{'en': 'Claro'},
'519786':{'en': 'Claro'},
'519787':{'en': 'Claro'},
'51979':{'en': 'Movistar'},
'519793':{'en': 'Claro'},
'519797':{'en': 'Claro'},
'5198':{'en': 'Claro'},
'519800':{'en': 'Movistar'},
'5198000':{'en': 'Entel'},
'5198001':{'en': 'Entel'},
'5198002':{'en': 'Entel'},
'519801':{'en': 'Movistar'},
'519802':{'en': 'Movistar'},
'519803':{'en': 'Movistar'},
'51981':{'en': 'Entel'},
'519816':{'en': 'Movistar'},
'519817':{'en': 'Movistar'},
'519818':{'en': 'Movistar'},
'519819':{'en': 'Movistar'},
'5198260':{'en': 'Movistar'},
'5198261':{'en': 'Movistar'},
'5198268':{'en': 'Movistar'},
'5198298':{'en': 'Movistar'},
'519834':{'en': 'Entel'},
'519835':{'en': 'Entel'},
'519836':{'en': 'Movistar'},
'519839':{'en': 'Movistar'},
'519840':{'en': 'Movistar'},
'519845':{'en': 'Movistar'},
'519846':{'en': 'Movistar'},
'519848':{'en': 'Movistar'},
'519849':{'en': 'Movistar'},
'51985':{'en': 'Movistar'},
'51988':{'en': 'Movistar'},
'51990':{'en': 'Movistar'},
'51991':{'en': 'Claro'},
'51992':{'en': 'Claro'},
'51993':{'en': 'Claro'},
'519940':{'en': 'Entel'},
'519941':{'en': 'Entel'},
'519942':{'en': 'Entel'},
'519943':{'en': 'Claro'},
'519944':{'en': 'Movistar'},
'519945':{'en': 'Movistar'},
'519946':{'en': 'Claro'},
'519947':{'en': 'Claro'},
'519948':{'en': 'Claro'},
'519949':{'en': 'Claro'},
'51995':{'en': 'Movistar'},
'51996':{'en': 'Movistar'},
'51997':{'en': 'Claro'},
'51998':{'en': 'Movistar'},
'519981':{'en': 'Entel'},
'519982':{'en': 'Entel'},
'519983':{'en': 'Entel'},
'51999':{'en': 'Movistar'},
'535':{'en': 'etecsa'},
'549113':{'en': 'Personal'},
'549114':{'en': 'Personal'},
'549115':{'en': 'Personal'},
'549116':{'en': 'Personal'},
'549220':{'en': 'Personal'},
'549221':{'en': 'Personal'},
'549222':{'en': 'Personal'},
'549223':{'en': 'Personal'},
'549224':{'en': 'Personal'},
'549225':{'en': 'Personal'},
'549226':{'en': 'Personal'},
'549227':{'en': 'Personal'},
'549228':{'en': 'Personal'},
'549229':{'en': 'Personal'},
'549230':{'en': 'Personal'},
'549231':{'en': 'Personal'},
'549232':{'en': 'Personal'},
'549233':{'en': 'Personal'},
'549234':{'en': 'Personal'},
'549235':{'en': 'Personal'},
'549236':{'en': 'Personal'},
'549239':{'en': 'Personal'},
'549247':{'en': 'Personal'},
'549249':{'en': 'Personal'},
'549260':{'en': 'Personal'},
'549261':{'en': 'Personal'},
'549262':{'en': 'Personal'},
'549263':{'en': 'Personal'},
'549264':{'en': 'Personal'},
'549265':{'en': 'Personal'},
'549266':{'en': 'Personal'},
'549280':{'en': 'Personal'},
'549290':{'en': 'Personal'},
'549291':{'en': 'Personal'},
'549292':{'en': 'Personal'},
'549293':{'en': 'Personal'},
'549294':{'en': 'Personal'},
'549295':{'en': 'Personal'},
'549296':{'en': 'Personal'},
'549297':{'en': 'Personal'},
'549298':{'en': 'Personal'},
'549299':{'en': 'Personal'},
'549332':{'en': 'Personal'},
'549336':{'en': 'Personal'},
'549338':{'en': 'Personal'},
'549340':{'en': 'Personal'},
'549341':{'en': 'Personal'},
'549342':{'en': 'Personal'},
'549343':{'en': 'Personal'},
'549344':{'en': 'Personal'},
'549345':{'en': 'Personal'},
'549346':{'en': 'Personal'},
'549347':{'en': 'Personal'},
'549348':{'en': 'Personal'},
'549349':{'en': 'Personal'},
'549351':{'en': 'Personal'},
'549352':{'en': 'Personal'},
'549353':{'en': 'Personal'},
'549354':{'en': 'Personal'},
'549356':{'en': 'Personal'},
'549357':{'en': 'Personal'},
'549358':{'en': 'Personal'},
'549362':{'en': 'Personal'},
'549364':{'en': 'Personal'},
'549370':{'en': 'Personal'},
'549371':{'en': 'Personal'},
'549372':{'en': 'Personal'},
'549373':{'en': 'Personal'},
'549374':{'en': 'Personal'},
'549375':{'en': 'Personal'},
'549376':{'en': 'Personal'},
'549377':{'en': 'Personal'},
'549378':{'en': 'Personal'},
'549379':{'en': 'Personal'},
'549380':{'en': 'Personal'},
'549381':{'en': 'Personal'},
'549382':{'en': 'Personal'},
'549383':{'en': 'Personal'},
'549384':{'en': 'Personal'},
'549385':{'en': 'Personal'},
'549386':{'en': 'Personal'},
'549387':{'en': 'Personal'},
'549388':{'en': 'Personal'},
'549389':{'en': 'Personal'},
'551195472':{'en': 'Vivo'},
'551195473':{'en': 'Vivo'},
'551195474':{'en': 'Vivo'},
'551195769':{'en': 'Vivo'},
'55119577':{'en': 'Vivo'},
'551195780':{'en': 'Vivo'},
'551195781':{'en': 'Vivo'},
'551195782':{'en': 'Vivo'},
'551195783':{'en': 'Vivo'},
'551195784':{'en': 'Vivo'},
'551195785':{'en': 'Vivo'},
'551195786':{'en': 'Vivo'},
'551196057':{'en': 'Vivo'},
'551196058':{'en': 'Vivo'},
'551196059':{'en': 'Vivo'},
'551196060':{'en': 'Vivo'},
'551196168':{'en': 'Claro BR'},
'551196169':{'en': 'Claro BR'},
'55119617':{'en': 'Claro BR'},
'55119618':{'en': 'Vivo'},
'551196180':{'en': 'Claro BR'},
'551196181':{'en': 'Claro BR'},
'55119619':{'en': 'Vivo'},
'55119630':{'en': 'Claro BR'},
'55119631':{'en': 'Claro BR'},
'55119632':{'en': 'Claro BR'},
'55119633':{'en': 'Claro BR'},
'55119637':{'en': 'Vivo'},
'55119638':{'en': 'Vivo'},
'55119639':{'en': 'Vivo'},
'55119640':{'en': 'Vivo'},
'55119641':{'en': 'Vivo'},
'55119647':{'en': 'Vivo'},
'55119648':{'en': 'Vivo'},
'55119649':{'en': 'Vivo'},
'55119657':{'en': 'Claro BR'},
'55119658':{'en': 'Claro BR'},
'55119659':{'en': 'Claro BR'},
'55119660':{'en': 'Claro BR'},
'55119661':{'en': 'Claro BR'},
'55119662':{'en': 'Claro BR'},
'55119663':{'en': 'Claro BR'},
'55119664':{'en': 'Claro BR'},
'551196650':{'en': 'Claro BR'},
'55119684':{'en': 'Vivo'},
'55119685':{'en': 'Vivo'},
'551196860':{'en': 'Vivo'},
'551196861':{'en': 'Vivo'},
'551196862':{'en': 'Vivo'},
'551196863':{'en': 'Vivo'},
'551196864':{'en': 'Vivo'},
'551196865':{'en': 'Vivo'},
'551196866':{'en': 'Vivo'},
'55119690':{'en': 'Vivo'},
'55119691':{'en': 'Claro BR'},
'551196910':{'en': 'Vivo'},
'551196911':{'en': 'Vivo'},
'551196912':{'en': 'Vivo'},
'551196913':{'en': 'Vivo'},
'55119692':{'en': 'Claro BR'},
'551196930':{'en': 'Claro BR'},
'551196931':{'en': 'Claro BR'},
'551197011':{'en': 'TIM'},
'551197012':{'en': 'TIM'},
'551197013':{'en': 'TIM'},
'551197014':{'en': 'TIM'},
'551197015':{'en': 'TIM'},
'551197016':{'en': 'TIM'},
'551197017':{'en': 'TIM'},
'551197018':{'en': 'TIM'},
'551197019':{'en': 'TIM'},
'55119702':{'en': 'TIM'},
'551197030':{'en': 'TIM'},
'551197031':{'en': 'TIM'},
'551197032':{'en': 'TIM'},
'551197033':{'en': 'TIM'},
'551197034':{'en': 'TIM'},
'551197035':{'en': 'TIM'},
'551197036':{'en': 'TIM'},
'551197037':{'en': 'TIM'},
'551197038':{'en': 'TIM'},
'551197049':{'en': 'TIM'},
'55119705':{'en': 'Claro BR'},
'551197050':{'en': 'TIM'},
'551197051':{'en': 'TIM'},
'55119706':{'en': 'Claro BR'},
'55119707':{'en': 'Claro BR'},
'55119708':{'en': 'Claro BR'},
'551197087':{'en': 'Vivo'},
'551197088':{'en': 'Vivo'},
'551197089':{'en': 'Vivo'},
'55119709':{'en': 'Vivo'},
'5511971':{'en': 'Vivo'},
'5511972':{'en': 'Vivo'},
'5511973':{'en': 'Vivo'},
'5511974':{'en': 'Vivo'},
'5511975':{'en': 'Vivo'},
'5511976':{'en': 'Claro BR'},
'551197968':{'en': 'Claro BR'},
'551197969':{'en': 'Claro BR'},
'55119797':{'en': 'Oi'},
'551197970':{'en': 'Claro BR'},
'55119798':{'en': 'Oi'},
'551197990':{'en': 'Oi'},
'551197991':{'en': 'Oi'},
'551197992':{'en': 'Oi'},
'551197993':{'en': 'Oi'},
'551197994':{'en': 'Oi'},
'551197995':{'en': 'Oi'},
'551198023':{'en': 'Oi'},
'551198024':{'en': 'Oi'},
'551198025':{'en': 'Oi'},
'551198026':{'en': 'Oi'},
'551198027':{'en': 'Oi'},
'551198028':{'en': 'Oi'},
'551198029':{'en': 'Oi'},
'55119803':{'en': 'Oi'},
'55119804':{'en': 'Oi'},
'55119805':{'en': 'Oi'},
'55119806':{'en': 'Oi'},
'55119807':{'en': 'Oi'},
'55119808':{'en': 'Oi'},
'55119809':{'en': 'Oi'},
'5511981':{'en': 'TIM'},
'5511982':{'en': 'TIM'},
'5511983':{'en': 'TIM'},
'5511984':{'en': 'TIM'},
'5511985':{'en': 'TIM'},
'5511986':{'en': 'TIM'},
'5511987':{'en': 'TIM'},
'5511988':{'en': 'Claro BR'},
'5511989':{'en': 'Claro BR'},
'5511991':{'en': 'Claro BR'},
'5511992':{'en': 'Claro BR'},
'5511993':{'en': 'Claro BR'},
'5511994':{'en': 'Claro BR'},
'5511995':{'en': 'Vivo'},
'5511996':{'en': 'Vivo'},
'5511997':{'en': 'Vivo'},
'5511998':{'en': 'Vivo'},
'5511999':{'en': 'Vivo'},
'551298111':{'en': 'TIM'},
'551298112':{'en': 'TIM'},
'551298113':{'en': 'TIM'},
'551298114':{'en': 'TIM'},
'551298115':{'en': 'TIM'},
'551298116':{'en': 'TIM'},
'551298117':{'en': 'TIM'},
'551298118':{'en': 'TIM'},
'551298119':{'en': 'TIM'},
'551298121':{'en': 'TIM'},
'551298122':{'en': 'TIM'},
'551298123':{'en': 'TIM'},
'551298124':{'en': 'TIM'},
'551298125':{'en': 'TIM'},
'551298126':{'en': 'TIM'},
'551298127':{'en': 'TIM'},
'551298128':{'en': 'TIM'},
'551298129':{'en': 'TIM'},
'551298131':{'en': 'TIM'},
'551298132':{'en': 'TIM'},
'551298133':{'en': 'TIM'},
'551298134':{'en': 'TIM'},
'551298135':{'en': 'TIM'},
'551298136':{'en': 'TIM'},
'551298137':{'en': 'TIM'},
'551298138':{'en': 'TIM'},
'551298139':{'en': 'TIM'},
'551298141':{'en': 'TIM'},
'551298142':{'en': 'TIM'},
'551298143':{'en': 'TIM'},
'551298144':{'en': 'TIM'},
'551298145':{'en': 'TIM'},
'551298146':{'en': 'TIM'},
'551298147':{'en': 'TIM'},
'551298148':{'en': 'TIM'},
'551298149':{'en': 'TIM'},
'551298151':{'en': 'TIM'},
'551298152':{'en': 'TIM'},
'551298153':{'en': 'TIM'},
'551298154':{'en': 'TIM'},
'551298155':{'en': 'TIM'},
'551298156':{'en': 'TIM'},
'551298157':{'en': 'TIM'},
'551298158':{'en': 'TIM'},
'551298159':{'en': 'TIM'},
'551298161':{'en': 'TIM'},
'551298162':{'en': 'TIM'},
'551298163':{'en': 'TIM'},
'551298164':{'en': 'TIM'},
'551298165':{'en': 'TIM'},
'551298166':{'en': 'TIM'},
'551298167':{'en': 'TIM'},
'551298168':{'en': 'TIM'},
'551298169':{'en': 'TIM'},
'551298171':{'en': 'TIM'},
'551298172':{'en': 'TIM'},
'551298173':{'en': 'TIM'},
'551298174':{'en': 'TIM'},
'551298175':{'en': 'TIM'},
'551298176':{'en': 'TIM'},
'551298177':{'en': 'TIM'},
'551298178':{'en': 'TIM'},
'551298179':{'en': 'TIM'},
'551298181':{'en': 'TIM'},
'551298182':{'en': 'TIM'},
'551298808':{'en': 'Oi'},
'551298809':{'en': 'Oi'},
'55129881':{'en': 'Oi'},
'551298820':{'en': 'Oi'},
'551298821':{'en': 'Oi'},
'551298822':{'en': 'Oi'},
'551298823':{'en': 'Oi'},
'5512991':{'en': 'Claro BR'},
'55129920':{'en': 'Claro BR'},
'55129921':{'en': 'Claro BR'},
'55129922':{'en': 'Claro BR'},
'55129923':{'en': 'Claro BR'},
'551299240':{'en': 'Claro BR'},
'551299241':{'en': 'Claro BR'},
'551299242':{'en': 'Claro BR'},
'551299243':{'en': 'Claro BR'},
'551299244':{'en': 'Claro BR'},
'551299245':{'en': 'Claro BR'},
'55129960':{'en': 'Vivo'},
'55129961':{'en': 'Vivo'},
'55129962':{'en': 'Vivo'},
'551299630':{'en': 'Vivo'},
'551299631':{'en': 'Vivo'},
'551299632':{'en': 'Vivo'},
'5512997':{'en': 'Vivo'},
'551398111':{'en': 'TIM'},
'551398112':{'en': 'TIM'},
'551398113':{'en': 'TIM'},
'551398114':{'en': 'TIM'},
'551398115':{'en': 'TIM'},
'551398116':{'en': 'TIM'},
'551398117':{'en': 'TIM'},
'551398118':{'en': 'TIM'},
'551398119':{'en': 'TIM'},
'551398121':{'en': 'TIM'},
'551398122':{'en': 'TIM'},
'551398123':{'en': 'TIM'},
'551398124':{'en': 'TIM'},
'551398125':{'en': 'TIM'},
'551398126':{'en': 'TIM'},
'551398127':{'en': 'TIM'},
'551398128':{'en': 'TIM'},
'551398129':{'en': 'TIM'},
'551398131':{'en': 'TIM'},
'551398132':{'en': 'TIM'},
'551398133':{'en': 'TIM'},
'551398134':{'en': 'TIM'},
'551398135':{'en': 'TIM'},
'551398136':{'en': 'TIM'},
'551398137':{'en': 'TIM'},
'551398138':{'en': 'TIM'},
'551398139':{'en': 'TIM'},
'551398141':{'en': 'TIM'},
'551398142':{'en': 'TIM'},
'551398143':{'en': 'TIM'},
'551398144':{'en': 'TIM'},
'551398145':{'en': 'TIM'},
'551398146':{'en': 'TIM'},
'551398147':{'en': 'TIM'},
'551398149':{'en': 'TIM'},
'551398151':{'en': 'TIM'},
'551398152':{'en': 'TIM'},
'551398153':{'en': 'TIM'},
'551398154':{'en': 'TIM'},
'551398155':{'en': 'TIM'},
'551398156':{'en': 'TIM'},
'551398157':{'en': 'TIM'},
'551398158':{'en': 'TIM'},
'551398159':{'en': 'TIM'},
'551398161':{'en': 'TIM'},
'551398803':{'en': 'Oi'},
'551398804':{'en': 'Oi'},
'551398805':{'en': 'Oi'},
'551398806':{'en': 'Oi'},
'551398807':{'en': 'Oi'},
'551398808':{'en': 'Oi'},
'551398809':{'en': 'Oi'},
'55139881':{'en': 'Oi'},
'551398820':{'en': 'Oi'},
'5513991':{'en': 'Claro BR'},
'55139920':{'en': 'Claro BR'},
'551399210':{'en': 'Claro BR'},
'551399211':{'en': 'Claro BR'},
'55139960':{'en': 'Vivo'},
'55139961':{'en': 'Vivo'},
'55139962':{'en': 'Vivo'},
'551399630':{'en': 'Vivo'},
'551399631':{'en': 'Vivo'},
'551399632':{'en': 'Vivo'},
'551399633':{'en': 'Vivo'},
'551399634':{'en': 'Vivo'},
'551399635':{'en': 'Vivo'},
'551399636':{'en': 'Vivo'},
'551399637':{'en': 'Vivo'},
'5513997':{'en': 'Vivo'},
'551498111':{'en': 'TIM'},
'551498112':{'en': 'TIM'},
'551498113':{'en': 'TIM'},
'551498114':{'en': 'TIM'},
'551498115':{'en': 'TIM'},
'551498116':{'en': 'TIM'},
'551498117':{'en': 'TIM'},
'551498118':{'en': 'TIM'},
'551498119':{'en': 'TIM'},
'551498121':{'en': 'TIM'},
'551498122':{'en': 'TIM'},
'551498123':{'en': 'TIM'},
'551498124':{'en': 'TIM'},
'551498125':{'en': 'TIM'},
'551498126':{'en': 'TIM'},
'551498127':{'en': 'TIM'},
'551498128':{'en': 'TIM'},
'551498129':{'en': 'TIM'},
'551498131':{'en': 'TIM'},
'551498132':{'en': 'TIM'},
'551498133':{'en': 'TIM'},
'551498134':{'en': 'TIM'},
'551498135':{'en': 'TIM'},
'551498136':{'en': 'TIM'},
'551498137':{'en': 'TIM'},
'551498138':{'en': 'TIM'},
'551498139':{'en': 'TIM'},
'551498141':{'en': 'TIM'},
'551498142':{'en': 'TIM'},
'551498143':{'en': 'TIM'},
'551498144':{'en': 'TIM'},
'551498145':{'en': 'TIM'},
'551498146':{'en': 'TIM'},
'551498147':{'en': 'TIM'},
'551498148':{'en': 'TIM'},
'551498149':{'en': 'TIM'},
'551498151':{'en': 'TIM'},
'551498152':{'en': 'TIM'},
'551498153':{'en': 'TIM'},
'551498154':{'en': 'TIM'},
'551498155':{'en': 'TIM'},
'551498156':{'en': 'TIM'},
'551498157':{'en': 'TIM'},
'551498158':{'en': 'TIM'},
'551498159':{'en': 'TIM'},
'551498161':{'en': 'TIM'},
'551498162':{'en': 'TIM'},
'551498163':{'en': 'TIM'},
'551498164':{'en': 'TIM'},
'551498165':{'en': 'TIM'},
'551498166':{'en': 'TIM'},
'551498806':{'en': 'Oi'},
'551498807':{'en': 'Oi'},
'551498808':{'en': 'Oi'},
'551498809':{'en': 'Oi'},
'551498810':{'en': 'Oi'},
'551498811':{'en': 'Oi'},
'551498812':{'en': 'Oi'},
'551498813':{'en': 'Oi'},
'551498814':{'en': 'Oi'},
'551499101':{'en': 'Claro BR'},
'551499102':{'en': 'Claro BR'},
'551499103':{'en': 'Claro BR'},
'551499104':{'en': 'Claro BR'},
'551499105':{'en': 'Claro BR'},
'551499106':{'en': 'Claro BR'},
'551499107':{'en': 'Claro BR'},
'551499108':{'en': 'Claro BR'},
'551499109':{'en': 'Claro BR'},
'551499111':{'en': 'Claro BR'},
'551499112':{'en': 'Claro BR'},
'551499113':{'en': 'Claro BR'},
'551499114':{'en': 'Claro BR'},
'551499115':{'en': 'Claro BR'},
'551499116':{'en': 'Claro BR'},
'551499117':{'en': 'Claro BR'},
'551499118':{'en': 'Claro BR'},
'551499119':{'en': 'Claro BR'},
'551499121':{'en': 'Claro BR'},
'551499122':{'en': 'Claro BR'},
'551499123':{'en': 'Claro BR'},
'551499124':{'en': 'Claro BR'},
'551499125':{'en': 'Claro BR'},
'551499126':{'en': 'Claro BR'},
'551499127':{'en': 'Claro BR'},
'551499128':{'en': 'Claro BR'},
'551499129':{'en': 'Claro BR'},
'551499131':{'en': 'Claro BR'},
'551499132':{'en': 'Claro BR'},
'551499133':{'en': 'Claro BR'},
'551499134':{'en': 'Claro BR'},
'551499135':{'en': 'Claro BR'},
'551499136':{'en': 'Claro BR'},
'551499137':{'en': 'Claro BR'},
'551499138':{'en': 'Claro BR'},
'551499141':{'en': 'Claro BR'},
'551499142':{'en': 'Claro BR'},
'551499143':{'en': 'Claro BR'},
'551499146':{'en': 'Claro BR'},
'551499147':{'en': 'Claro BR'},
'551499148':{'en': 'Claro BR'},
'551499149':{'en': 'Claro BR'},
'551499151':{'en': 'Claro BR'},
'551499152':{'en': 'Claro BR'},
'551499153':{'en': 'Claro BR'},
'551499154':{'en': 'Claro BR'},
'551499155':{'en': 'Claro BR'},
'551499156':{'en': 'Claro BR'},
'551499157':{'en': 'Claro BR'},
'551499161':{'en': 'Claro BR'},
'551499162':{'en': 'Claro BR'},
'551499163':{'en': 'Claro BR'},
'551499164':{'en': 'Claro BR'},
'551499165':{'en': 'Claro BR'},
'551499166':{'en': 'Claro BR'},
'551499167':{'en': 'Claro BR'},
'551499168':{'en': 'Claro BR'},
'551499169':{'en': 'Claro BR'},
'551499171':{'en': 'Claro BR'},
'551499172':{'en': 'Claro BR'},
'551499173':{'en': 'Claro BR'},
'551499174':{'en': 'Claro BR'},
'551499175':{'en': 'Claro BR'},
'551499176':{'en': 'Claro BR'},
'551499177':{'en': 'Claro BR'},
'551499178':{'en': 'Claro BR'},
'551499179':{'en': 'Claro BR'},
'551499181':{'en': 'Claro BR'},
'551499182':{'en': 'Claro BR'},
'551499183':{'en': 'Claro BR'},
'551499184':{'en': 'Claro BR'},
'551499185':{'en': 'Claro BR'},
'551499186':{'en': 'Claro BR'},
'551499187':{'en': 'Claro BR'},
'551499188':{'en': 'Claro BR'},
'551499189':{'en': 'Claro BR'},
'551499191':{'en': 'Claro BR'},
'551499192':{'en': 'Claro BR'},
'551499193':{'en': 'Claro BR'},
'551499194':{'en': 'Claro BR'},
'551499195':{'en': 'Claro BR'},
'551499196':{'en': 'Claro BR'},
'551499197':{'en': 'Claro BR'},
'5514996':{'en': 'Vivo'},
'5514997':{'en': 'Vivo'},
'55149980':{'en': 'Vivo'},
'55149981':{'en': 'Vivo'},
'55149982':{'en': 'Vivo'},
'551499830':{'en': 'Vivo'},
'551499831':{'en': 'Vivo'},
'551499832':{'en': 'Vivo'},
'551598111':{'en': 'TIM'},
'551598112':{'en': 'TIM'},
'551598113':{'en': 'TIM'},
'551598114':{'en': 'TIM'},
'551598115':{'en': 'TIM'},
'551598116':{'en': 'TIM'},
'551598117':{'en': 'TIM'},
'551598118':{'en': 'TIM'},
'551598119':{'en': 'TIM'},
'551598121':{'en': 'TIM'},
'551598122':{'en': 'TIM'},
'551598123':{'en': 'TIM'},
'551598124':{'en': 'TIM'},
'551598125':{'en': 'TIM'},
'551598126':{'en': 'TIM'},
'551598127':{'en': 'TIM'},
'551598128':{'en': 'TIM'},
'551598129':{'en': 'TIM'},
'551598131':{'en': 'TIM'},
'551598132':{'en': 'TIM'},
'551598133':{'en': 'TIM'},
'551598134':{'en': 'TIM'},
'551598135':{'en': 'TIM'},
'551598136':{'en': 'TIM'},
'551598138':{'en': 'TIM'},
'551598139':{'en': 'TIM'},
'551598141':{'en': 'TIM'},
'551598804':{'en': 'Oi'},
'551598805':{'en': 'Oi'},
'551598806':{'en': 'Oi'},
'551598807':{'en': 'Oi'},
'551598808':{'en': 'Oi'},
'551598809':{'en': 'Oi'},
'551598810':{'en': 'Oi'},
'551598813':{'en': 'Oi'},
'551598814':{'en': 'Oi'},
'551598815':{'en': 'Oi'},
'551599101':{'en': 'Claro BR'},
'551599102':{'en': 'Claro BR'},
'551599103':{'en': 'Claro BR'},
'551599104':{'en': 'Claro BR'},
'551599105':{'en': 'Claro BR'},
'551599106':{'en': 'Claro BR'},
'551599107':{'en': 'Claro BR'},
'551599108':{'en': 'Claro BR'},
'551599109':{'en': 'Claro BR'},
'551599111':{'en': 'Claro BR'},
'551599112':{'en': 'Claro BR'},
'551599113':{'en': 'Claro BR'},
'551599114':{'en': 'Claro BR'},
'551599115':{'en': 'Claro BR'},
'551599116':{'en': 'Claro BR'},
'551599117':{'en': 'Claro BR'},
'551599118':{'en': 'Claro BR'},
'551599119':{'en': 'Claro BR'},
'551599121':{'en': 'Claro BR'},
'551599122':{'en': 'Claro BR'},
'551599123':{'en': 'Claro BR'},
'551599124':{'en': 'Claro BR'},
'551599125':{'en': 'Claro BR'},
'551599126':{'en': 'Claro BR'},
'551599127':{'en': 'Claro BR'},
'551599128':{'en': 'Claro BR'},
'551599129':{'en': 'Claro BR'},
'551599131':{'en': 'Claro BR'},
'551599132':{'en': 'Claro BR'},
'551599133':{'en': 'Claro BR'},
'551599134':{'en': 'Claro BR'},
'551599135':{'en': 'Claro BR'},
'551599136':{'en': 'Claro BR'},
'551599137':{'en': 'Claro BR'},
'551599138':{'en': 'Claro BR'},
'551599139':{'en': 'Claro BR'},
'551599141':{'en': 'Claro BR'},
'551599142':{'en': 'Claro BR'},
'551599143':{'en': 'Claro BR'},
'551599144':{'en': 'Claro BR'},
'551599145':{'en': 'Claro BR'},
'551599146':{'en': 'Claro BR'},
'551599147':{'en': 'Claro BR'},
'551599148':{'en': 'Claro BR'},
'551599149':{'en': 'Claro BR'},
'551599151':{'en': 'Claro BR'},
'551599152':{'en': 'Claro BR'},
'551599153':{'en': 'Claro BR'},
'551599154':{'en': 'Claro BR'},
'551599155':{'en': 'Claro BR'},
'551599156':{'en': 'Claro BR'},
'551599157':{'en': 'Claro BR'},
'551599158':{'en': 'Claro BR'},
'551599159':{'en': 'Claro BR'},
'551599161':{'en': 'Claro BR'},
'551599162':{'en': 'Claro BR'},
'551599163':{'en': 'Claro BR'},
'551599164':{'en': 'Claro BR'},
'551599165':{'en': 'Claro BR'},
'551599166':{'en': 'Claro BR'},
'551599167':{'en': 'Claro BR'},
'551599168':{'en': 'Claro BR'},
'551599169':{'en': 'Claro BR'},
'551599171':{'en': 'Claro BR'},
'551599172':{'en': 'Claro BR'},
'551599173':{'en': 'Claro BR'},
'551599174':{'en': 'Claro BR'},
'551599175':{'en': 'Claro BR'},
'551599176':{'en': 'Claro BR'},
'551599177':{'en': 'Claro BR'},
'551599178':{'en': 'Claro BR'},
'551599179':{'en': 'Claro BR'},
'551599181':{'en': 'Claro BR'},
'551599182':{'en': 'Claro BR'},
'551599183':{'en': 'Claro BR'},
'551599184':{'en': 'Claro BR'},
'551599185':{'en': 'Claro BR'},
'551599186':{'en': 'Claro BR'},
'551599187':{'en': 'Claro BR'},
'551599188':{'en': 'Claro BR'},
'551599201':{'en': 'Claro BR'},
'55159960':{'en': 'Vivo'},
'55159961':{'en': 'Vivo'},
'55159962':{'en': 'Vivo'},
'55159963':{'en': 'Vivo'},
'55159964':{'en': 'Vivo'},
'55159965':{'en': 'Vivo'},
'55159966':{'en': 'Vivo'},
'55159967':{'en': 'Vivo'},
'55159968':{'en': 'Vivo'},
'551599690':{'en': 'Vivo'},
'551599691':{'en': 'Vivo'},
'551599692':{'en': 'Vivo'},
'551599693':{'en': 'Vivo'},
'551599694':{'en': 'Vivo'},
'551599695':{'en': 'Vivo'},
'551599696':{'en': 'Vivo'},
'551599697':{'en': 'Vivo'},
'5515997':{'en': 'Vivo'},
'551698111':{'en': 'TIM'},
'551698112':{'en': 'TIM'},
'551698113':{'en': 'TIM'},
'551698114':{'en': 'TIM'},
'551698115':{'en': 'TIM'},
'551698116':{'en': 'TIM'},
'551698117':{'en': 'TIM'},
'551698118':{'en': 'TIM'},
'551698119':{'en': 'TIM'},
'551698121':{'en': 'TIM'},
'551698122':{'en': 'TIM'},
'551698123':{'en': 'TIM'},
'551698124':{'en': 'TIM'},
'551698125':{'en': 'TIM'},
'551698126':{'en': 'TIM'},
'551698127':{'en': 'TIM'},
'551698128':{'en': 'TIM'},
'551698129':{'en': 'TIM'},
'551698131':{'en': 'TIM'},
'551698132':{'en': 'TIM'},
'551698133':{'en': 'TIM'},
'551698134':{'en': 'TIM'},
'551698135':{'en': 'TIM'},
'551698136':{'en': 'TIM'},
'551698137':{'en': 'TIM'},
'551698138':{'en': 'TIM'},
'551698139':{'en': 'TIM'},
'551698141':{'en': 'TIM'},
'551698142':{'en': 'TIM'},
'551698143':{'en': 'TIM'},
'551698144':{'en': 'TIM'},
'551698145':{'en': 'TIM'},
'551698146':{'en': 'TIM'},
'551698147':{'en': 'TIM'},
'551698148':{'en': 'TIM'},
'551698149':{'en': 'TIM'},
'551698151':{'en': 'TIM'},
'551698152':{'en': 'TIM'},
'551698153':{'en': 'TIM'},
'551698154':{'en': 'TIM'},
'551698155':{'en': 'TIM'},
'551698156':{'en': 'TIM'},
'551698157':{'en': 'TIM'},
'551698158':{'en': 'TIM'},
'551698159':{'en': 'TIM'},
'551698161':{'en': 'TIM'},
'551698162':{'en': 'TIM'},
'551698163':{'en': 'TIM'},
'551698164':{'en': 'TIM'},
'551698165':{'en': 'TIM'},
'551698166':{'en': 'TIM'},
'551698167':{'en': 'TIM'},
'551698168':{'en': 'TIM'},
'551698169':{'en': 'TIM'},
'551698171':{'en': 'TIM'},
'551698172':{'en': 'TIM'},
'551698173':{'en': 'TIM'},
'551698174':{'en': 'TIM'},
'551698175':{'en': 'TIM'},
'551698176':{'en': 'TIM'},
'551698177':{'en': 'TIM'},
'551698178':{'en': 'TIM'},
'551698179':{'en': 'TIM'},
'551698181':{'en': 'TIM'},
'551698182':{'en': 'TIM'},
'551698183':{'en': 'TIM'},
'551698184':{'en': 'TIM'},
'551698803':{'en': 'Oi'},
'551698804':{'en': 'Oi'},
'551698805':{'en': 'Oi'},
'551698806':{'en': 'Oi'},
'551698807':{'en': 'Oi'},
'551698808':{'en': 'Oi'},
'551698809':{'en': 'Oi'},
'55169881':{'en': 'Oi'},
'551698820':{'en': 'Oi'},
'551698821':{'en': 'Oi'},
'551698822':{'en': 'Oi'},
'551698823':{'en': 'Oi'},
'5516991':{'en': 'Claro BR'},
'5516992':{'en': 'Claro BR'},
'55169930':{'en': 'Claro BR'},
'55169931':{'en': 'Claro BR'},
'55169932':{'en': 'Claro BR'},
'55169933':{'en': 'Claro BR'},
'55169934':{'en': 'Claro BR'},
'55169935':{'en': 'Claro BR'},
'551699360':{'en': 'Claro BR'},
'551699361':{'en': 'Claro BR'},
'551699362':{'en': 'Claro BR'},
'551699363':{'en': 'Claro BR'},
'551699364':{'en': 'Claro BR'},
'551699601':{'en': 'Vivo'},
'551699606':{'en': 'Vivo'},
'551699607':{'en': 'Vivo'},
'551699608':{'en': 'Vivo'},
'551699609':{'en': 'Vivo'},
'551699701':{'en': 'Vivo'},
'551699702':{'en': 'Vivo'},
'551699703':{'en': 'Vivo'},
'551699704':{'en': 'Vivo'},
'551699705':{'en': 'Vivo'},
'551699706':{'en': 'Vivo'},
'551699707':{'en': 'Vivo'},
'551699708':{'en': 'Vivo'},
'551699709':{'en': 'Vivo'},
'551699711':{'en': 'Vivo'},
'551699712':{'en': 'Vivo'},
'551699713':{'en': 'Vivo'},
'551699714':{'en': 'Vivo'},
'551699715':{'en': 'Vivo'},
'551699716':{'en': 'Vivo'},
'551699717':{'en': 'Vivo'},
'551699718':{'en': 'Vivo'},
'551699719':{'en': 'Vivo'},
'551699721':{'en': 'Vivo'},
'551699722':{'en': 'Vivo'},
'551699723':{'en': 'Vivo'},
'551699724':{'en': 'Vivo'},
'551699725':{'en': 'Vivo'},
'551699726':{'en': 'Vivo'},
'551699727':{'en': 'Vivo'},
'551699728':{'en': 'Vivo'},
'551699729':{'en': 'Vivo'},
'551699731':{'en': 'Vivo'},
'551699732':{'en': 'Vivo'},
'551699733':{'en': 'Vivo'},
'551699734':{'en': 'Vivo'},
'551699735':{'en': 'Vivo'},
'551699736':{'en': 'Vivo'},
'551699737':{'en': 'Vivo'},
'551699738':{'en': 'Vivo'},
'551699739':{'en': 'Vivo'},
'551699741':{'en': 'Vivo'},
'551699742':{'en': 'Vivo'},
'551699743':{'en': 'Vivo'},
'551699744':{'en': 'Vivo'},
'551699745':{'en': 'Vivo'},
'551699746':{'en': 'Vivo'},
'551699747':{'en': 'Vivo'},
'551699748':{'en': 'Vivo'},
'551699749':{'en': 'Vivo'},
'551699751':{'en': 'Vivo'},
'551699752':{'en': 'Vivo'},
'551699753':{'en': 'Vivo'},
'551699754':{'en': 'Vivo'},
'551699755':{'en': 'Vivo'},
'551699756':{'en': 'Vivo'},
'551699757':{'en': 'Vivo'},
'551699758':{'en': 'Vivo'},
'551699759':{'en': 'Vivo'},
'551699761':{'en': 'Vivo'},
'551699762':{'en': 'Vivo'},
'551699763':{'en': 'Vivo'},
'551699764':{'en': 'Vivo'},
'551699765':{'en': 'Vivo'},
'551699766':{'en': 'Vivo'},
'551699767':{'en': 'Vivo'},
'551699768':{'en': 'Vivo'},
'551699769':{'en': 'Vivo'},
'551699770':{'en': 'Vivo'},
'551699771':{'en': 'Vivo'},
'551699772':{'en': 'Vivo'},
'551699773':{'en': 'Vivo'},
'551699774':{'en': 'Vivo'},
'551699775':{'en': 'Vivo'},
'551699776':{'en': 'Vivo'},
'551699777':{'en': 'Vivo'},
'551699778':{'en': 'Vivo'},
'551699780':{'en': 'Vivo'},
'551699781':{'en': 'Vivo'},
'551699782':{'en': 'Vivo'},
'551699783':{'en': 'Vivo'},
'551699784':{'en': 'Vivo'},
'551699785':{'en': 'Vivo'},
'551699786':{'en': 'Vivo'},
'551699787':{'en': 'Vivo'},
'551699788':{'en': 'Vivo'},
'551699791':{'en': 'Vivo'},
'551699792':{'en': 'Vivo'},
'551699793':{'en': 'Vivo'},
'551699794':{'en': 'Vivo'},
'551699796':{'en': 'Vivo'},
'551699961':{'en': 'Vivo'},
'551699962':{'en': 'Vivo'},
'551699963':{'en': 'Vivo'},
'551699964':{'en': 'Vivo'},
'551699975':{'en': 'Vivo'},
'551699991':{'en': 'Vivo'},
'551699992':{'en': 'Vivo'},
'551699993':{'en': 'Vivo'},
'551699994':{'en': 'Vivo'},
'551798111':{'en': 'TIM'},
'551798112':{'en': 'TIM'},
'551798113':{'en': 'TIM'},
'551798114':{'en': 'TIM'},
'551798115':{'en': 'TIM'},
'551798116':{'en': 'TIM'},
'551798117':{'en': 'TIM'},
'551798118':{'en': 'TIM'},
'551798119':{'en': 'TIM'},
'551798121':{'en': 'TIM'},
'551798122':{'en': 'TIM'},
'551798123':{'en': 'TIM'},
'551798124':{'en': 'TIM'},
'551798125':{'en': 'TIM'},
'551798126':{'en': 'TIM'},
'551798127':{'en': 'TIM'},
'551798128':{'en': 'TIM'},
'551798129':{'en': 'TIM'},
'551798131':{'en': 'TIM'},
'551798132':{'en': 'TIM'},
'551798133':{'en': 'TIM'},
'551798134':{'en': 'TIM'},
'551798135':{'en': 'TIM'},
'551798136':{'en': 'TIM'},
'551798137':{'en': 'TIM'},
'551798138':{'en': 'TIM'},
'551798139':{'en': 'TIM'},
'551798141':{'en': 'TIM'},
'551798142':{'en': 'TIM'},
'551798143':{'en': 'TIM'},
'551798144':{'en': 'TIM'},
'551798145':{'en': 'TIM'},
'551798146':{'en': 'TIM'},
'551798147':{'en': 'TIM'},
'551798148':{'en': 'TIM'},
'551798149':{'en': 'TIM'},
'551798151':{'en': 'TIM'},
'551798152':{'en': 'TIM'},
'551798153':{'en': 'TIM'},
'551798154':{'en': 'TIM'},
'551798155':{'en': 'TIM'},
'551798156':{'en': 'TIM'},
'551798803':{'en': 'Oi'},
'551798804':{'en': 'Oi'},
'551798805':{'en': 'Oi'},
'551798806':{'en': 'Oi'},
'551798807':{'en': 'Oi'},
'551798808':{'en': 'Oi'},
'551798809':{'en': 'Oi'},
'551798810':{'en': 'Oi'},
'551798811':{'en': 'Oi'},
'551798812':{'en': 'Oi'},
'551798813':{'en': 'Oi'},
'5517991':{'en': 'Claro BR'},
'55179920':{'en': 'Claro BR'},
'55179921':{'en': 'Claro BR'},
'55179922':{'en': 'Claro BR'},
'551799230':{'en': 'Claro BR'},
'551799231':{'en': 'Claro BR'},
'551799232':{'en': 'Claro BR'},
'551799233':{'en': 'Claro BR'},
'551799234':{'en': 'Claro BR'},
'551799235':{'en': 'Claro BR'},
'551799236':{'en': 'Claro BR'},
'551799601':{'en': 'Vivo'},
'551799602':{'en': 'Vivo'},
'551799603':{'en': 'Vivo'},
'551799604':{'en': 'Vivo'},
'551799605':{'en': 'Vivo'},
'551799606':{'en': 'Vivo'},
'551799607':{'en': 'Vivo'},
'551799608':{'en': 'Vivo'},
'551799609':{'en': 'Vivo'},
'551799611':{'en': 'Vivo'},
'551799612':{'en': 'Vivo'},
'551799613':{'en': 'Vivo'},
'551799614':{'en': 'Vivo'},
'551799615':{'en': 'Vivo'},
'551799616':{'en': 'Vivo'},
'551799617':{'en': 'Vivo'},
'551799618':{'en': 'Vivo'},
'551799619':{'en': 'Vivo'},
'551799621':{'en': 'Vivo'},
'551799622':{'en': 'Vivo'},
'551799623':{'en': 'Vivo'},
'551799624':{'en': 'Vivo'},
'551799625':{'en': 'Vivo'},
'551799626':{'en': 'Vivo'},
'551799627':{'en': 'Vivo'},
'551799628':{'en': 'Vivo'},
'551799629':{'en': 'Vivo'},
'551799631':{'en': 'Vivo'},
'551799632':{'en': 'Vivo'},
'551799633':{'en': 'Vivo'},
'551799634':{'en': 'Vivo'},
'551799635':{'en': 'Vivo'},
'551799636':{'en': 'Vivo'},
'551799637':{'en': 'Vivo'},
'551799638':{'en': 'Vivo'},
'551799639':{'en': 'Vivo'},
'551799641':{'en': 'Vivo'},
'551799642':{'en': 'Vivo'},
'551799643':{'en': 'Vivo'},
'551799644':{'en': 'Vivo'},
'551799645':{'en': 'Vivo'},
'551799646':{'en': 'Vivo'},
'551799701':{'en': 'Vivo'},
'551799702':{'en': 'Vivo'},
'551799703':{'en': 'Vivo'},
'551799704':{'en': 'Vivo'},
'551799705':{'en': 'Vivo'},
'551799706':{'en': 'Vivo'},
'551799707':{'en': 'Vivo'},
'551799708':{'en': 'Vivo'},
'551799709':{'en': 'Vivo'},
'551799711':{'en': 'Vivo'},
'551799712':{'en': 'Vivo'},
'551799713':{'en': 'Vivo'},
'551799714':{'en': 'Vivo'},
'551799715':{'en': 'Vivo'},
'551799716':{'en': 'Vivo'},
'551799717':{'en': 'Vivo'},
'551799718':{'en': 'Vivo'},
'551799719':{'en': 'Vivo'},
'551799721':{'en': 'Vivo'},
'551799722':{'en': 'Vivo'},
'551799723':{'en': 'Vivo'},
'551799724':{'en': 'Vivo'},
'551799725':{'en': 'Vivo'},
'551799726':{'en': 'Vivo'},
'551799727':{'en': 'Vivo'},
'551799728':{'en': 'Vivo'},
'551799729':{'en': 'Vivo'},
'551799731':{'en': 'Vivo'},
'551799732':{'en': 'Vivo'},
'551799733':{'en': 'Vivo'},
'551799734':{'en': 'Vivo'},
'551799735':{'en': 'Vivo'},
'551799736':{'en': 'Vivo'},
'551799737':{'en': 'Vivo'},
'551799738':{'en': 'Vivo'},
'551799739':{'en': 'Vivo'},
'551799741':{'en': 'Vivo'},
'551799742':{'en': 'Vivo'},
'551799743':{'en': 'Vivo'},
'551799744':{'en': 'Vivo'},
'551799745':{'en': 'Vivo'},
'551799746':{'en': 'Vivo'},
'551799747':{'en': 'Vivo'},
'551799748':{'en': 'Vivo'},
'551799749':{'en': 'Vivo'},
'551799751':{'en': 'Vivo'},
'551799752':{'en': 'Vivo'},
'551799753':{'en': 'Vivo'},
'551799754':{'en': 'Vivo'},
'551799755':{'en': 'Vivo'},
'551799756':{'en': 'Vivo'},
'551799757':{'en': 'Vivo'},
'551799758':{'en': 'Vivo'},
'551799759':{'en': 'Vivo'},
'551799761':{'en': 'Vivo'},
'551799762':{'en': 'Vivo'},
'551799763':{'en': 'Vivo'},
'551799764':{'en': 'Vivo'},
'551799765':{'en': 'Vivo'},
'551799766':{'en': 'Vivo'},
'551799767':{'en': 'Vivo'},
'551799768':{'en': 'Vivo'},
'551799769':{'en': 'Vivo'},
'551799771':{'en': 'Vivo'},
'551799772':{'en': 'Vivo'},
'551799773':{'en': 'Vivo'},
'551799774':{'en': 'Vivo'},
'551799775':{'en': 'Vivo'},
'551799776':{'en': 'Vivo'},
'551799777':{'en': 'Vivo'},
'551799778':{'en': 'Vivo'},
'551799779':{'en': 'Vivo'},
'551799780':{'en': 'Vivo'},
'551799783':{'en': 'Vivo'},
'551799784':{'en': 'Vivo'},
'551799785':{'en': 'Vivo'},
'551799791':{'en': 'Vivo'},
'551898111':{'en': 'TIM'},
'551898112':{'en': 'TIM'},
'551898113':{'en': 'TIM'},
'551898114':{'en': 'TIM'},
'551898115':{'en': 'TIM'},
'551898116':{'en': 'TIM'},
'551898117':{'en': 'TIM'},
'551898118':{'en': 'TIM'},
'551898119':{'en': 'TIM'},
'551898121':{'en': 'TIM'},
'551898122':{'en': 'TIM'},
'551898123':{'en': 'TIM'},
'551898124':{'en': 'TIM'},
'551898125':{'en': 'TIM'},
'551898126':{'en': 'TIM'},
'551898127':{'en': 'TIM'},
'551898128':{'en': 'TIM'},
'551898129':{'en': 'TIM'},
'551898131':{'en': 'TIM'},
'551898132':{'en': 'TIM'},
'551898133':{'en': 'TIM'},
'551898134':{'en': 'TIM'},
'551898135':{'en': 'TIM'},
'551898136':{'en': 'TIM'},
'551898137':{'en': 'TIM'},
'551898138':{'en': 'TIM'},
'551898139':{'en': 'TIM'},
'551898141':{'en': 'TIM'},
'551898142':{'en': 'TIM'},
'551898143':{'en': 'TIM'},
'551898144':{'en': 'TIM'},
'551898145':{'en': 'TIM'},
'551898146':{'en': 'TIM'},
'551898147':{'en': 'TIM'},
'551898148':{'en': 'TIM'},
'551898149':{'en': 'TIM'},
'551898151':{'en': 'TIM'},
'551898810':{'en': 'Oi'},
'551898811':{'en': 'Oi'},
'55189910':{'en': 'Claro BR'},
'55189911':{'en': 'Claro BR'},
'55189912':{'en': 'Claro BR'},
'55189913':{'en': 'Claro BR'},
'55189914':{'en': 'Claro BR'},
'55189915':{'en': 'Claro BR'},
'55189916':{'en': 'Claro BR'},
'55189917':{'en': 'Claro BR'},
'551899180':{'en': 'Claro BR'},
'551899197':{'en': 'Claro BR'},
'551899198':{'en': 'Claro BR'},
'551899199':{'en': 'Claro BR'},
'551899601':{'en': 'Vivo'},
'551899602':{'en': 'Vivo'},
'551899603':{'en': 'Vivo'},
'551899604':{'en': 'Vivo'},
'551899605':{'en': 'Vivo'},
'551899606':{'en': 'Vivo'},
'551899607':{'en': 'Vivo'},
'551899608':{'en': 'Vivo'},
'551899609':{'en': 'Vivo'},
'551899611':{'en': 'Vivo'},
'551899612':{'en': 'Vivo'},
'551899613':{'en': 'Vivo'},
'551899614':{'en': 'Vivo'},
'551899615':{'en': 'Vivo'},
'551899616':{'en': 'Vivo'},
'551899617':{'en': 'Vivo'},
'551899618':{'en': 'Vivo'},
'551899621':{'en': 'Vivo'},
'551899622':{'en': 'Vivo'},
'551899623':{'en': 'Vivo'},
'551899624':{'en': 'Vivo'},
'551899625':{'en': 'Vivo'},
'551899626':{'en': 'Vivo'},
'551899627':{'en': 'Vivo'},
'551899628':{'en': 'Vivo'},
'551899629':{'en': 'Vivo'},
'551899631':{'en': 'Vivo'},
'551899632':{'en': 'Vivo'},
'551899633':{'en': 'Vivo'},
'551899634':{'en': 'Vivo'},
'551899635':{'en': 'Vivo'},
'551899636':{'en': 'Vivo'},
'551899637':{'en': 'Vivo'},
'551899638':{'en': 'Vivo'},
'551899639':{'en': 'Vivo'},
'551899641':{'en': 'Vivo'},
'551899642':{'en': 'Vivo'},
'551899643':{'en': 'Vivo'},
'551899644':{'en': 'Vivo'},
'551899645':{'en': 'Vivo'},
'551899646':{'en': 'Vivo'},
'551899647':{'en': 'Vivo'},
'551899648':{'en': 'Vivo'},
'551899649':{'en': 'Vivo'},
'551899651':{'en': 'Vivo'},
'551899652':{'en': 'Vivo'},
'551899653':{'en': 'Vivo'},
'551899654':{'en': 'Vivo'},
'551899655':{'en': 'Vivo'},
'551899656':{'en': 'Vivo'},
'551899657':{'en': 'Vivo'},
'551899658':{'en': 'Vivo'},
'551899659':{'en': 'Vivo'},
'551899661':{'en': 'Vivo'},
'551899662':{'en': 'Vivo'},
'551899663':{'en': 'Vivo'},
'551899664':{'en': 'Vivo'},
'551899665':{'en': 'Vivo'},
'551899666':{'en': 'Vivo'},
'551899667':{'en': 'Vivo'},
'551899668':{'en': 'Vivo'},
'551899669':{'en': 'Vivo'},
'551899671':{'en': 'Vivo'},
'551899672':{'en': 'Vivo'},
'551899673':{'en': 'Vivo'},
'551899674':{'en': 'Vivo'},
'551899675':{'en': 'Vivo'},
'551899676':{'en': 'Vivo'},
'551899677':{'en': 'Vivo'},
'551899678':{'en': 'Vivo'},
'551899679':{'en': 'Vivo'},
'551899681':{'en': 'Vivo'},
'551899682':{'en': 'Vivo'},
'551899683':{'en': 'Vivo'},
'551899684':{'en': 'Vivo'},
'551899685':{'en': 'Vivo'},
'551899686':{'en': 'Vivo'},
'551899687':{'en': 'Vivo'},
'551899701':{'en': 'Vivo'},
'551899702':{'en': 'Vivo'},
'551899703':{'en': 'Vivo'},
'551899704':{'en': 'Vivo'},
'551899705':{'en': 'Vivo'},
'551899706':{'en': 'Vivo'},
'551899707':{'en': 'Vivo'},
'551899708':{'en': 'Vivo'},
'551899709':{'en': 'Vivo'},
'551899711':{'en': 'Vivo'},
'551899712':{'en': 'Vivo'},
'551899713':{'en': 'Vivo'},
'551899714':{'en': 'Vivo'},
'551899715':{'en': 'Vivo'},
'551899716':{'en': 'Vivo'},
'551899717':{'en': 'Vivo'},
'551899718':{'en': 'Vivo'},
'551899719':{'en': 'Vivo'},
'551899721':{'en': 'Vivo'},
'551899722':{'en': 'Vivo'},
'551899723':{'en': 'Vivo'},
'551899724':{'en': 'Vivo'},
'551899725':{'en': 'Vivo'},
'551899726':{'en': 'Vivo'},
'551899727':{'en': 'Vivo'},
'551899728':{'en': 'Vivo'},
'551899729':{'en': 'Vivo'},
'551899731':{'en': 'Vivo'},
'551899732':{'en': 'Vivo'},
'551899733':{'en': 'Vivo'},
'551899734':{'en': 'Vivo'},
'551899735':{'en': 'Vivo'},
'551899736':{'en': 'Vivo'},
'551899737':{'en': 'Vivo'},
'551899738':{'en': 'Vivo'},
'551899739':{'en': 'Vivo'},
'551899741':{'en': 'Vivo'},
'551899742':{'en': 'Vivo'},
'551899743':{'en': 'Vivo'},
'551899744':{'en': 'Vivo'},
'551899745':{'en': 'Vivo'},
'551899746':{'en': 'Vivo'},
'551899747':{'en': 'Vivo'},
'551899748':{'en': 'Vivo'},
'551899749':{'en': 'Vivo'},
'551899751':{'en': 'Vivo'},
'551899752':{'en': 'Vivo'},
'551899753':{'en': 'Vivo'},
'551899754':{'en': 'Vivo'},
'551899755':{'en': 'Vivo'},
'551899756':{'en': 'Vivo'},
'551899757':{'en': 'Vivo'},
'551899758':{'en': 'Vivo'},
'551899759':{'en': 'Vivo'},
'551899761':{'en': 'Vivo'},
'551899762':{'en': 'Vivo'},
'551899763':{'en': 'Vivo'},
'551899764':{'en': 'Vivo'},
'551899765':{'en': 'Vivo'},
'551899766':{'en': 'Vivo'},
'551899767':{'en': 'Vivo'},
'551899768':{'en': 'Vivo'},
'551899771':{'en': 'Vivo'},
'551899772':{'en': 'Vivo'},
'551899773':{'en': 'Vivo'},
'551899774':{'en': 'Vivo'},
'551899775':{'en': 'Vivo'},
'551899776':{'en': 'Vivo'},
'551899777':{'en': 'Vivo'},
'551899778':{'en': 'Vivo'},
'551899779':{'en': 'Vivo'},
'55189978':{'en': 'Vivo'},
'551899791':{'en': 'Vivo'},
'551899792':{'en': 'Vivo'},
'551899793':{'en': 'Vivo'},
'551899794':{'en': 'Vivo'},
'551899795':{'en': 'Vivo'},
'551899796':{'en': 'Vivo'},
'551899797':{'en': 'Vivo'},
'551899798':{'en': 'Vivo'},
'551899799':{'en': 'Vivo'},
'5519981':{'en': 'TIM'},
'551998201':{'en': 'TIM'},
'551998202':{'en': 'TIM'},
'551998203':{'en': 'TIM'},
'551998204':{'en': 'TIM'},
'551998205':{'en': 'TIM'},
'551998206':{'en': 'TIM'},
'551998207':{'en': 'TIM'},
'551998208':{'en': 'TIM'},
'551998209':{'en': 'TIM'},
'551998211':{'en': 'TIM'},
'551998212':{'en': 'TIM'},
'551998213':{'en': 'TIM'},
'551998214':{'en': 'TIM'},
'551998215':{'en': 'TIM'},
'551998216':{'en': 'TIM'},
'551998217':{'en': 'TIM'},
'551998218':{'en': 'TIM'},
'551998219':{'en': 'TIM'},
'551998221':{'en': 'TIM'},
'551998222':{'en': 'TIM'},
'551998223':{'en': 'TIM'},
'551998224':{'en': 'TIM'},
'551998225':{'en': 'TIM'},
'551998226':{'en': 'TIM'},
'551998227':{'en': 'TIM'},
'551998229':{'en': 'TIM'},
'5519991':{'en': 'Claro BR'},
'5519992':{'en': 'Claro BR'},
'5519993':{'en': 'Claro BR'},
'5519994':{'en': 'Claro BR'},
'551999500':{'en': 'Claro BR'},
'551999501':{'en': 'Claro BR'},
'551999502':{'en': 'Claro BR'},
'551999503':{'en': 'Claro BR'},
'551999504':{'en': 'Claro BR'},
'551999505':{'en': 'Claro BR'},
'551999506':{'en': 'Claro BR'},
'551999507':{'en': 'Claro BR'},
'551999508':{'en': 'Claro BR'},
'551999601':{'en': 'Vivo'},
'551999602':{'en': 'Vivo'},
'551999603':{'en': 'Vivo'},
'551999604':{'en': 'Vivo'},
'551999605':{'en': 'Vivo'},
'551999606':{'en': 'Vivo'},
'551999607':{'en': 'Vivo'},
'551999608':{'en': 'Vivo'},
'551999609':{'en': 'Vivo'},
'55199961':{'en': 'Vivo'},
'551999621':{'en': 'Vivo'},
'551999622':{'en': 'Vivo'},
'551999623':{'en': 'Vivo'},
'551999624':{'en': 'Vivo'},
'551999625':{'en': 'Vivo'},
'551999626':{'en': 'Vivo'},
'551999627':{'en': 'Vivo'},
'551999628':{'en': 'Vivo'},
'551999629':{'en': 'Vivo'},
'551999631':{'en': 'Vivo'},
'551999632':{'en': 'Vivo'},
'551999633':{'en': 'Vivo'},
'551999634':{'en': 'Vivo'},
'551999635':{'en': 'Vivo'},
'551999636':{'en': 'Vivo'},
'551999637':{'en': 'Vivo'},
'551999638':{'en': 'Vivo'},
'551999639':{'en': 'Vivo'},
'551999641':{'en': 'Vivo'},
'551999642':{'en': 'Vivo'},
'551999643':{'en': 'Vivo'},
'551999644':{'en': 'Vivo'},
'551999645':{'en': 'Vivo'},
'551999646':{'en': 'Vivo'},
'551999647':{'en': 'Vivo'},
'551999648':{'en': 'Vivo'},
'551999649':{'en': 'Vivo'},
'551999651':{'en': 'Vivo'},
'551999652':{'en': 'Vivo'},
'551999653':{'en': 'Vivo'},
'551999654':{'en': 'Vivo'},
'551999655':{'en': 'Vivo'},
'551999656':{'en': 'Vivo'},
'551999657':{'en': 'Vivo'},
'551999658':{'en': 'Vivo'},
'551999659':{'en': 'Vivo'},
'551999661':{'en': 'Vivo'},
'551999662':{'en': 'Vivo'},
'551999663':{'en': 'Vivo'},
'551999664':{'en': 'Vivo'},
'551999665':{'en': 'Vivo'},
'551999666':{'en': 'Vivo'},
'551999667':{'en': 'Vivo'},
'551999668':{'en': 'Vivo'},
'551999669':{'en': 'Vivo'},
'551999671':{'en': 'Vivo'},
'551999672':{'en': 'Vivo'},
'551999673':{'en': 'Vivo'},
'551999674':{'en': 'Vivo'},
'551999675':{'en': 'Vivo'},
'551999676':{'en': 'Vivo'},
'551999677':{'en': 'Vivo'},
'551999678':{'en': 'Vivo'},
'551999679':{'en': 'Vivo'},
'551999681':{'en': 'Vivo'},
'551999682':{'en': 'Vivo'},
'551999683':{'en': 'Vivo'},
'551999684':{'en': 'Vivo'},
'551999685':{'en': 'Vivo'},
'551999686':{'en': 'Vivo'},
'551999687':{'en': 'Vivo'},
'551999688':{'en': 'Vivo'},
'551999689':{'en': 'Vivo'},
'551999691':{'en': 'Vivo'},
'551999692':{'en': 'Vivo'},
'551999693':{'en': 'Vivo'},
'551999694':{'en': 'Vivo'},
'551999695':{'en': 'Vivo'},
'551999696':{'en': 'Vivo'},
'551999697':{'en': 'Vivo'},
'551999698':{'en': 'Vivo'},
'551999699':{'en': 'Vivo'},
'55199970':{'en': 'Vivo'},
'55199971':{'en': 'Vivo'},
'55199972':{'en': 'Vivo'},
'55199973':{'en': 'Vivo'},
'55199974':{'en': 'Vivo'},
'551999751':{'en': 'Vivo'},
'551999752':{'en': 'Vivo'},
'551999753':{'en': 'Vivo'},
'551999754':{'en': 'Vivo'},
'551999755':{'en': 'Vivo'},
'551999756':{'en': 'Vivo'},
'551999757':{'en': 'Vivo'},
'551999758':{'en': 'Vivo'},
'551999759':{'en': 'Vivo'},
'551999761':{'en': 'Vivo'},
'551999762':{'en': 'Vivo'},
'551999763':{'en': 'Vivo'},
'551999764':{'en': 'Vivo'},
'551999765':{'en': 'Vivo'},
'551999766':{'en': 'Vivo'},
'551999767':{'en': 'Vivo'},
'551999768':{'en': 'Vivo'},
'551999769':{'en': 'Vivo'},
'551999771':{'en': 'Vivo'},
'551999772':{'en': 'Vivo'},
'551999773':{'en': 'Vivo'},
'551999774':{'en': 'Vivo'},
'551999775':{'en': 'Vivo'},
'551999776':{'en': 'Vivo'},
'551999777':{'en': 'Vivo'},
'551999778':{'en': 'Vivo'},
'551999779':{'en': 'Vivo'},
'55199978':{'en': 'Vivo'},
'55199979':{'en': 'Vivo'},
'55199980':{'en': 'Vivo'},
'55199981':{'en': 'Vivo'},
'55199982':{'en': 'Vivo'},
'55199983':{'en': 'Vivo'},
'55199984':{'en': 'Vivo'},
'55199985':{'en': 'Vivo'},
'55199986':{'en': 'Vivo'},
'55199987':{'en': 'Vivo'},
'55199988':{'en': 'Vivo'},
'551999890':{'en': 'Vivo'},
'5521971':{'en': 'Vivo'},
'5521972':{'en': 'Vivo'},
'55219730':{'en': 'Claro BR'},
'55219731':{'en': 'Claro BR'},
'55219732':{'en': 'Claro BR'},
'55219733':{'en': 'Claro BR'},
'55219734':{'en': 'Claro BR'},
'55219735':{'en': 'Claro BR'},
'55219736':{'en': 'Claro BR'},
'552197370':{'en': 'Claro BR'},
'552197371':{'en': 'Claro BR'},
'552197372':{'en': 'Claro BR'},
'552197373':{'en': 'Claro BR'},
'5521974':{'en': 'Claro BR'},
'5521975':{'en': 'Claro BR'},
'5521976':{'en': 'Claro BR'},
'5521981':{'en': 'TIM'},
'5521982':{'en': 'TIM'},
'552198301':{'en': 'TIM'},
'552198302':{'en': 'TIM'},
'552198303':{'en': 'TIM'},
'552198304':{'en': 'TIM'},
'552198305':{'en': 'TIM'},
'552198306':{'en': 'TIM'},
'552198307':{'en': 'TIM'},
'552198308':{'en': 'TIM'},
'552198309':{'en': 'TIM'},
'552198311':{'en': 'TIM'},
'552198312':{'en': 'TIM'},
'552198313':{'en': 'TIM'},
'552198314':{'en': 'TIM'},
'552198315':{'en': 'TIM'},
'552198316':{'en': 'TIM'},
'552198317':{'en': 'TIM'},
'552198318':{'en': 'TIM'},
'552198319':{'en': 'TIM'},
'552198321':{'en': 'TIM'},
'552198322':{'en': 'TIM'},
'552198323':{'en': 'TIM'},
'552198324':{'en': 'TIM'},
'552198325':{'en': 'TIM'},
'552198326':{'en': 'TIM'},
'552198327':{'en': 'TIM'},
'552198328':{'en': 'TIM'},
'552198329':{'en': 'TIM'},
'552198331':{'en': 'TIM'},
'552198332':{'en': 'TIM'},
'552198333':{'en': 'TIM'},
'552198334':{'en': 'TIM'},
'552198335':{'en': 'TIM'},
'552198336':{'en': 'TIM'},
'552198337':{'en': 'TIM'},
'552198338':{'en': 'TIM'},
'552198339':{'en': 'TIM'},
'552198341':{'en': 'TIM'},
'552198342':{'en': 'TIM'},
'552198343':{'en': 'TIM'},
'552198344':{'en': 'TIM'},
'552198345':{'en': 'TIM'},
'552198346':{'en': 'TIM'},
'552198347':{'en': 'TIM'},
'552198348':{'en': 'TIM'},
'552198349':{'en': 'TIM'},
'552198351':{'en': 'TIM'},
'552198352':{'en': 'TIM'},
'552198353':{'en': 'TIM'},
'552198354':{'en': 'TIM'},
'552198355':{'en': 'TIM'},
'552198356':{'en': 'TIM'},
'552198357':{'en': 'TIM'},
'552198358':{'en': 'TIM'},
'552198359':{'en': 'TIM'},
'552198361':{'en': 'TIM'},
'552198362':{'en': 'TIM'},
'552198363':{'en': 'TIM'},
'552198364':{'en': 'TIM'},
'552198365':{'en': 'TIM'},
'552198366':{'en': 'TIM'},
'552198367':{'en': 'TIM'},
'552198368':{'en': 'TIM'},
'552198369':{'en': 'TIM'},
'552198371':{'en': 'TIM'},
'552198372':{'en': 'TIM'},
'552198373':{'en': 'TIM'},
'552198374':{'en': 'TIM'},
'552198375':{'en': 'TIM'},
'552198376':{'en': 'TIM'},
'552198377':{'en': 'TIM'},
'552198378':{'en': 'TIM'},
'552198379':{'en': 'TIM'},
'552198381':{'en': 'TIM'},
'552198382':{'en': 'TIM'},
'552198383':{'en': 'TIM'},
'552198384':{'en': 'TIM'},
'552198385':{'en': 'TIM'},
'552198386':{'en': 'TIM'},
'552198401':{'en': 'Oi'},
'552198402':{'en': 'Oi'},
'552198403':{'en': 'Oi'},
'552198404':{'en': 'Oi'},
'552198405':{'en': 'Oi'},
'552198406':{'en': 'Oi'},
'552198407':{'en': 'Oi'},
'552198408':{'en': 'Oi'},
'552198409':{'en': 'Oi'},
'552198411':{'en': 'Oi'},
'552198412':{'en': 'Oi'},
'552198413':{'en': 'Oi'},
'552198414':{'en': 'Oi'},
'552198415':{'en': 'Oi'},
'552198416':{'en': 'Oi'},
'552198417':{'en': 'Oi'},
'552198418':{'en': 'Oi'},
'552198419':{'en': 'Oi'},
'5521985':{'en': 'Oi'},
'5521986':{'en': 'Oi'},
'5521987':{'en': 'Oi'},
'5521988':{'en': 'Oi'},
'5521989':{'en': 'Oi'},
'5521991':{'en': 'Claro BR'},
'5521992':{'en': 'Claro BR'},
'5521993':{'en': 'Claro BR'},
'5521994':{'en': 'Claro BR'},
'5521995':{'en': 'Vivo'},
'5521996':{'en': 'Vivo'},
'5521997':{'en': 'Vivo'},
'5521998':{'en': 'Vivo'},
'5521999':{'en': 'Vivo'},
'552298111':{'en': 'TIM'},
'552298112':{'en': 'TIM'},
'552298113':{'en': 'TIM'},
'552298114':{'en': 'TIM'},
'552298115':{'en': 'TIM'},
'552298116':{'en': 'TIM'},
'552298117':{'en': 'TIM'},
'552298118':{'en': 'TIM'},
'552298119':{'en': 'TIM'},
'552298121':{'en': 'TIM'},
'552298122':{'en': 'TIM'},
'552298123':{'en': 'TIM'},
'552298124':{'en': 'TIM'},
'552298125':{'en': 'TIM'},
'552298126':{'en': 'TIM'},
'552298127':{'en': 'TIM'},
'552298128':{'en': 'TIM'},
'552298129':{'en': 'TIM'},
'552298131':{'en': 'TIM'},
'552298132':{'en': 'TIM'},
'552298133':{'en': 'TIM'},
'552298134':{'en': 'TIM'},
'552298135':{'en': 'TIM'},
'552298136':{'en': 'TIM'},
'552298137':{'en': 'TIM'},
'552298138':{'en': 'TIM'},
'552298139':{'en': 'TIM'},
'552298141':{'en': 'TIM'},
'552298142':{'en': 'TIM'},
'552298143':{'en': 'TIM'},
'552298144':{'en': 'TIM'},
'552298145':{'en': 'TIM'},
'552298146':{'en': 'TIM'},
'552298147':{'en': 'TIM'},
'552298148':{'en': 'TIM'},
'552298149':{'en': 'TIM'},
'552298151':{'en': 'TIM'},
'5522985':{'en': 'Oi'},
'5522986':{'en': 'Oi'},
'5522987':{'en': 'Oi'},
'5522988':{'en': 'Oi'},
'5522989':{'en': 'Oi'},
'552299101':{'en': 'Claro BR'},
'552299102':{'en': 'Claro BR'},
'552299103':{'en': 'Claro BR'},
'552299104':{'en': 'Claro BR'},
'552299105':{'en': 'Claro BR'},
'552299201':{'en': 'Claro BR'},
'552299202':{'en': 'Claro BR'},
'552299203':{'en': 'Claro BR'},
'552299204':{'en': 'Claro BR'},
'552299205':{'en': 'Claro BR'},
'552299206':{'en': 'Claro BR'},
'552299207':{'en': 'Claro BR'},
'552299208':{'en': 'Claro BR'},
'552299209':{'en': 'Claro BR'},
'552299211':{'en': 'Claro BR'},
'552299212':{'en': 'Claro BR'},
'552299213':{'en': 'Claro BR'},
'552299214':{'en': 'Claro BR'},
'552299215':{'en': 'Claro BR'},
'552299216':{'en': 'Claro BR'},
'552299217':{'en': 'Claro BR'},
'552299218':{'en': 'Claro BR'},
'552299219':{'en': 'Claro BR'},
'552299221':{'en': 'Claro BR'},
'552299222':{'en': 'Claro BR'},
'552299223':{'en': 'Claro BR'},
'552299224':{'en': 'Claro BR'},
'552299225':{'en': 'Claro BR'},
'552299226':{'en': 'Claro BR'},
'552299227':{'en': 'Claro BR'},
'552299228':{'en': 'Claro BR'},
'552299229':{'en': 'Claro BR'},
'552299231':{'en': 'Claro BR'},
'552299232':{'en': 'Claro BR'},
'552299233':{'en': 'Claro BR'},
'552299234':{'en': 'Claro BR'},
'552299235':{'en': 'Claro BR'},
'552299236':{'en': 'Claro BR'},
'552299237':{'en': 'Claro BR'},
'552299238':{'en': 'Claro BR'},
'552299239':{'en': 'Claro BR'},
'552299241':{'en': 'Claro BR'},
'552299242':{'en': 'Claro BR'},
'552299243':{'en': 'Claro BR'},
'552299244':{'en': 'Claro BR'},
'552299245':{'en': 'Claro BR'},
'552299246':{'en': 'Claro BR'},
'552299247':{'en': 'Claro BR'},
'552299248':{'en': 'Claro BR'},
'552299249':{'en': 'Claro BR'},
'552299251':{'en': 'Claro BR'},
'552299252':{'en': 'Claro BR'},
'552299253':{'en': 'Claro BR'},
'552299254':{'en': 'Claro BR'},
'552299255':{'en': 'Claro BR'},
'552299256':{'en': 'Claro BR'},
'552299257':{'en': 'Claro BR'},
'552299258':{'en': 'Claro BR'},
'552299259':{'en': 'Claro BR'},
'552299261':{'en': 'Claro BR'},
'552299262':{'en': 'Claro BR'},
'552299263':{'en': 'Claro BR'},
'552299264':{'en': 'Claro BR'},
'552299265':{'en': 'Claro BR'},
'552299266':{'en': 'Claro BR'},
'552299267':{'en': 'Claro BR'},
'552299268':{'en': 'Claro BR'},
'552299269':{'en': 'Claro BR'},
'552299271':{'en': 'Claro BR'},
'552299272':{'en': 'Claro BR'},
'552299273':{'en': 'Claro BR'},
'552299274':{'en': 'Claro BR'},
'552299275':{'en': 'Claro BR'},
'552299276':{'en': 'Claro BR'},
'552299277':{'en': 'Claro BR'},
'552299278':{'en': 'Claro BR'},
'552299279':{'en': 'Claro BR'},
'552299281':{'en': 'Claro BR'},
'552299282':{'en': 'Claro BR'},
'552299283':{'en': 'Claro BR'},
'552299284':{'en': 'Claro BR'},
'552299285':{'en': 'Claro BR'},
'552299286':{'en': 'Claro BR'},
'552299287':{'en': 'Claro BR'},
'552299288':{'en': 'Claro BR'},
'552299289':{'en': 'Claro BR'},
'55229970':{'en': 'Vivo'},
'55229971':{'en': 'Vivo'},
'55229972':{'en': 'Vivo'},
'55229973':{'en': 'Vivo'},
'55229974':{'en': 'Vivo'},
'55229975':{'en': 'Vivo'},
'552299760':{'en': 'Vivo'},
'552299761':{'en': 'Vivo'},
'552299762':{'en': 'Vivo'},
'552299763':{'en': 'Vivo'},
'552299764':{'en': 'Vivo'},
'552299765':{'en': 'Vivo'},
'552299766':{'en': 'Vivo'},
'552299767':{'en': 'Vivo'},
'5522998':{'en': 'Vivo'},
'5522999':{'en': 'Vivo'},
'552498111':{'en': 'TIM'},
'552498112':{'en': 'TIM'},
'552498113':{'en': 'TIM'},
'552498114':{'en': 'TIM'},
'552498115':{'en': 'TIM'},
'552498116':{'en': 'TIM'},
'552498117':{'en': 'TIM'},
'552498118':{'en': 'TIM'},
'552498119':{'en': 'TIM'},
'552498121':{'en': 'TIM'},
'552498122':{'en': 'TIM'},
'552498123':{'en': 'TIM'},
'552498124':{'en': 'TIM'},
'552498125':{'en': 'TIM'},
'552498126':{'en': 'TIM'},
'552498127':{'en': 'TIM'},
'552498128':{'en': 'TIM'},
'552498129':{'en': 'TIM'},
'552498131':{'en': 'TIM'},
'552498132':{'en': 'TIM'},
'552498133':{'en': 'TIM'},
'552498134':{'en': 'TIM'},
'552498135':{'en': 'TIM'},
'552498136':{'en': 'TIM'},
'552498137':{'en': 'TIM'},
'552498138':{'en': 'TIM'},
'552498139':{'en': 'TIM'},
'552498141':{'en': 'TIM'},
'552498142':{'en': 'TIM'},
'552498143':{'en': 'TIM'},
'552498144':{'en': 'TIM'},
'552498145':{'en': 'TIM'},
'552498182':{'en': 'TIM'},
'5524985':{'en': 'Oi'},
'5524986':{'en': 'Oi'},
'5524987':{'en': 'Oi'},
'5524988':{'en': 'Oi'},
'5524989':{'en': 'Oi'},
'55249920':{'en': 'Claro BR'},
'55249921':{'en': 'Claro BR'},
'55249922':{'en': 'Claro BR'},
'55249923':{'en': 'Claro BR'},
'55249924':{'en': 'Claro BR'},
'55249925':{'en': 'Claro BR'},
'55249926':{'en': 'Claro BR'},
'55249927':{'en': 'Claro BR'},
'552499280':{'en': 'Claro BR'},
'552499281':{'en': 'Claro BR'},
'552499282':{'en': 'Claro BR'},
'552499291':{'en': 'Claro BR'},
'552499292':{'en': 'Claro BR'},
'552499293':{'en': 'Claro BR'},
'552499294':{'en': 'Claro BR'},
'552499295':{'en': 'Claro BR'},
'552499296':{'en': 'Claro BR'},
'552499297':{'en': 'Claro BR'},
'552499298':{'en': 'Claro BR'},
'552499299':{'en': 'Claro BR'},
'552499301':{'en': 'Claro BR'},
'552499395':{'en': 'Claro BR'},
'55249962':{'en': 'Vivo'},
'55249963':{'en': 'Vivo'},
'55249964':{'en': 'Vivo'},
'55249965':{'en': 'Vivo'},
'55249966':{'en': 'Vivo'},
'55249967':{'en': 'Vivo'},
'55249968':{'en': 'Vivo'},
'55249969':{'en': 'Vivo'},
'5524997':{'en': 'Vivo'},
'5524998':{'en': 'Vivo'},
'55249990':{'en': 'Vivo'},
'55249991':{'en': 'Vivo'},
'552499920':{'en': 'Vivo'},
'552499921':{'en': 'Vivo'},
'552499922':{'en': 'Vivo'},
'552499923':{'en': 'Vivo'},
'552499924':{'en': 'Vivo'},
'552499925':{'en': 'Vivo'},
'55249994':{'en': 'Vivo'},
'55249995':{'en': 'Vivo'},
'55249996':{'en': 'Vivo'},
'55249997':{'en': 'Vivo'},
'55249998':{'en': 'Vivo'},
'55249999':{'en': 'Vivo'},
'552798111':{'en': 'TIM'},
'552798112':{'en': 'TIM'},
'552798113':{'en': 'TIM'},
'552798114':{'en': 'TIM'},
'552798115':{'en': 'TIM'},
'552798116':{'en': 'TIM'},
'552798117':{'en': 'TIM'},
'552798118':{'en': 'TIM'},
'552798119':{'en': 'TIM'},
'552798121':{'en': 'TIM'},
'552798122':{'en': 'TIM'},
'552798123':{'en': 'TIM'},
'552798124':{'en': 'TIM'},
'552798125':{'en': 'TIM'},
'552798126':{'en': 'TIM'},
'552798127':{'en': 'TIM'},
'552798128':{'en': 'TIM'},
'552798129':{'en': 'TIM'},
'552798131':{'en': 'TIM'},
'552798132':{'en': 'TIM'},
'552798133':{'en': 'TIM'},
'552798134':{'en': 'TIM'},
'552798135':{'en': 'TIM'},
'552798136':{'en': 'TIM'},
'552798137':{'en': 'TIM'},
'552798138':{'en': 'TIM'},
'552798139':{'en': 'TIM'},
'552798141':{'en': 'TIM'},
'552798142':{'en': 'TIM'},
'552798143':{'en': 'TIM'},
'552798144':{'en': 'TIM'},
'552798145':{'en': 'TIM'},
'552798146':{'en': 'TIM'},
'552798147':{'en': 'TIM'},
'552798148':{'en': 'TIM'},
'552798149':{'en': 'TIM'},
'552798151':{'en': 'TIM'},
'552798152':{'en': 'TIM'},
'552798153':{'en': 'TIM'},
'552798154':{'en': 'TIM'},
'552798155':{'en': 'TIM'},
'552798156':{'en': 'TIM'},
'552798157':{'en': 'TIM'},
'552798158':{'en': 'TIM'},
'552798159':{'en': 'TIM'},
'552798161':{'en': 'TIM'},
'552798162':{'en': 'TIM'},
'552798163':{'en': 'TIM'},
'552798164':{'en': 'TIM'},
'552798165':{'en': 'TIM'},
'552798166':{'en': 'TIM'},
'552798167':{'en': 'TIM'},
'552798168':{'en': 'TIM'},
'552798169':{'en': 'TIM'},
'552798171':{'en': 'TIM'},
'552798172':{'en': 'TIM'},
'552798173':{'en': 'TIM'},
'552798174':{'en': 'TIM'},
'552798175':{'en': 'TIM'},
'552798176':{'en': 'TIM'},
'552798177':{'en': 'TIM'},
'552798178':{'en': 'TIM'},
'552798182':{'en': 'TIM'},
'5527985':{'en': 'Oi'},
'5527986':{'en': 'Oi'},
'5527987':{'en': 'Oi'},
'5527988':{'en': 'Oi'},
'5527989':{'en': 'Oi'},
'552799201':{'en': 'Claro BR'},
'552799202':{'en': 'Claro BR'},
'552799203':{'en': 'Claro BR'},
'552799204':{'en': 'Claro BR'},
'552799205':{'en': 'Claro BR'},
'552799222':{'en': 'Claro BR'},
'552799223':{'en': 'Claro BR'},
'552799224':{'en': 'Claro BR'},
'552799225':{'en': 'Claro BR'},
'552799226':{'en': 'Claro BR'},
'552799227':{'en': 'Claro BR'},
'552799228':{'en': 'Claro BR'},
'552799229':{'en': 'Claro BR'},
'552799231':{'en': 'Claro BR'},
'552799232':{'en': 'Claro BR'},
'552799233':{'en': 'Claro BR'},
'552799234':{'en': 'Claro BR'},
'552799235':{'en': 'Claro BR'},
'552799236':{'en': 'Claro BR'},
'552799237':{'en': 'Claro BR'},
'552799238':{'en': 'Claro BR'},
'552799239':{'en': 'Claro BR'},
'552799241':{'en': 'Claro BR'},
'552799242':{'en': 'Claro BR'},
'552799243':{'en': 'Claro BR'},
'552799244':{'en': 'Claro BR'},
'552799245':{'en': 'Claro BR'},
'552799246':{'en': 'Claro BR'},
'552799247':{'en': 'Claro BR'},
'552799248':{'en': 'Claro BR'},
'552799249':{'en': 'Claro BR'},
'552799251':{'en': 'Claro BR'},
'552799252':{'en': 'Claro BR'},
'552799253':{'en': 'Claro BR'},
'552799254':{'en': 'Claro BR'},
'552799255':{'en': 'Claro BR'},
'552799256':{'en': 'Claro BR'},
'552799257':{'en': 'Claro BR'},
'552799258':{'en': 'Claro BR'},
'552799259':{'en': 'Claro BR'},
'552799261':{'en': 'Claro BR'},
'552799262':{'en': 'Claro BR'},
'552799263':{'en': 'Claro BR'},
'552799264':{'en': 'Claro BR'},
'552799265':{'en': 'Claro BR'},
'552799266':{'en': 'Claro BR'},
'552799267':{'en': 'Claro BR'},
'552799268':{'en': 'Claro BR'},
'552799269':{'en': 'Claro BR'},
'552799271':{'en': 'Claro BR'},
'552799272':{'en': 'Claro BR'},
'552799273':{'en': 'Claro BR'},
'552799274':{'en': 'Claro BR'},
'552799275':{'en': 'Claro BR'},
'552799276':{'en': 'Claro BR'},
'552799277':{'en': 'Claro BR'},
'552799278':{'en': 'Claro BR'},
'552799279':{'en': 'Claro BR'},
'552799281':{'en': 'Claro BR'},
'552799282':{'en': 'Claro BR'},
'552799283':{'en': 'Claro BR'},
'552799284':{'en': 'Claro BR'},
'552799285':{'en': 'Claro BR'},
'552799286':{'en': 'Claro BR'},
'552799287':{'en': 'Claro BR'},
'552799288':{'en': 'Claro BR'},
'552799289':{'en': 'Claro BR'},
'552799291':{'en': 'Claro BR'},
'552799292':{'en': 'Claro BR'},
'552799293':{'en': 'Claro BR'},
'552799294':{'en': 'Claro BR'},
'552799295':{'en': 'Claro BR'},
'552799296':{'en': 'Claro BR'},
'552799297':{'en': 'Claro BR'},
'552799298':{'en': 'Claro BR'},
'552799299':{'en': 'Claro BR'},
'552799309':{'en': 'Claro BR'},
'552799311':{'en': 'Claro BR'},
'552799312':{'en': 'Claro BR'},
'552799316':{'en': 'Claro BR'},
'55279960':{'en': 'Vivo'},
'55279961':{'en': 'Vivo'},
'55279962':{'en': 'Vivo'},
'55279963':{'en': 'Vivo'},
'55279964':{'en': 'Vivo'},
'552799650':{'en': 'Vivo'},
'552799651':{'en': 'Vivo'},
'552799652':{'en': 'Vivo'},
'552799653':{'en': 'Vivo'},
'5527997':{'en': 'Vivo'},
'5527998':{'en': 'Vivo'},
'5527999':{'en': 'Vivo'},
'552898111':{'en': 'TIM'},
'552898112':{'en': 'TIM'},
'552898113':{'en': 'TIM'},
'552898114':{'en': 'TIM'},
'552898115':{'en': 'TIM'},
'552898116':{'en': 'TIM'},
'552898117':{'en': 'TIM'},
'552898118':{'en': 'TIM'},
'552898119':{'en': 'TIM'},
'5528985':{'en': 'Oi'},
'5528986':{'en': 'Oi'},
'5528987':{'en': 'Oi'},
'5528988':{'en': 'Oi'},
'5528989':{'en': 'Oi'},
'552899210':{'en': 'Claro BR'},
'552899222':{'en': 'Claro BR'},
'552899251':{'en': 'Claro BR'},
'552899252':{'en': 'Claro BR'},
'552899253':{'en': 'Claro BR'},
'552899254':{'en': 'Claro BR'},
'552899255':{'en': 'Claro BR'},
'552899256':{'en': 'Claro BR'},
'552899257':{'en': 'Claro BR'},
'552899258':{'en': 'Claro BR'},
'552899271':{'en': 'Claro BR'},
'552899272':{'en': 'Claro BR'},
'552899273':{'en': 'Claro BR'},
'552899274':{'en': 'Claro BR'},
'552899275':{'en': 'Claro BR'},
'552899276':{'en': 'Claro BR'},
'552899277':{'en': 'Claro BR'},
'552899278':{'en': 'Claro BR'},
'552899279':{'en': 'Claro BR'},
'552899291':{'en': 'Claro BR'},
'552899298':{'en': 'Claro BR'},
'552899881':{'en': 'Vivo'},
'552899882':{'en': 'Vivo'},
'552899883':{'en': 'Vivo'},
'552899884':{'en': 'Vivo'},
'552899885':{'en': 'Vivo'},
'552899886':{'en': 'Vivo'},
'552899901':{'en': 'Vivo'},
'552899902':{'en': 'Vivo'},
'552899903':{'en': 'Vivo'},
'552899904':{'en': 'Vivo'},
'552899905':{'en': 'Vivo'},
'552899915':{'en': 'Vivo'},
'552899916':{'en': 'Vivo'},
'552899917':{'en': 'Vivo'},
'552899918':{'en': 'Vivo'},
'552899919':{'en': 'Vivo'},
'552899921':{'en': 'Vivo'},
'552899922':{'en': 'Vivo'},
'552899923':{'en': 'Vivo'},
'552899924':{'en': 'Vivo'},
'552899925':{'en': 'Vivo'},
'552899926':{'en': 'Vivo'},
'552899935':{'en': 'Vivo'},
'552899938':{'en': 'Vivo'},
'552899939':{'en': 'Vivo'},
'552899945':{'en': 'Vivo'},
'552899946':{'en': 'Vivo'},
'552899951':{'en': 'Vivo'},
'552899952':{'en': 'Vivo'},
'552899953':{'en': 'Vivo'},
'552899954':{'en': 'Vivo'},
'552899955':{'en': 'Vivo'},
'552899956':{'en': 'Vivo'},
'552899957':{'en': 'Vivo'},
'552899958':{'en': 'Vivo'},
'552899959':{'en': 'Vivo'},
'552899961':{'en': 'Vivo'},
'552899962':{'en': 'Vivo'},
'552899963':{'en': 'Vivo'},
'552899964':{'en': 'Vivo'},
'552899965':{'en': 'Vivo'},
'552899966':{'en': 'Vivo'},
'552899967':{'en': 'Vivo'},
'552899968':{'en': 'Vivo'},
'552899969':{'en': 'Vivo'},
'552899971':{'en': 'Vivo'},
'552899972':{'en': 'Vivo'},
'552899973':{'en': 'Vivo'},
'552899974':{'en': 'Vivo'},
'552899975':{'en': 'Vivo'},
'552899976':{'en': 'Vivo'},
'552899977':{'en': 'Vivo'},
'552899978':{'en': 'Vivo'},
'552899979':{'en': 'Vivo'},
'552899981':{'en': 'Vivo'},
'552899982':{'en': 'Vivo'},
'552899983':{'en': 'Vivo'},
'552899984':{'en': 'Vivo'},
'552899985':{'en': 'Vivo'},
'552899986':{'en': 'Vivo'},
'552899987':{'en': 'Vivo'},
'552899988':{'en': 'Vivo'},
'552899989':{'en': 'Vivo'},
'552899991':{'en': 'Vivo'},
'552899992':{'en': 'Vivo'},
'552899993':{'en': 'Vivo'},
'552899994':{'en': 'Vivo'},
'552899995':{'en': 'Vivo'},
'552899996':{'en': 'Vivo'},
'552899997':{'en': 'Vivo'},
'552899998':{'en': 'Vivo'},
'55319820':{'en': 'Claro BR'},
'55319821':{'en': 'Claro BR'},
'55319822':{'en': 'Claro BR'},
'55319823':{'en': 'Claro BR'},
'553198240':{'en': 'Claro BR'},
'553198241':{'en': 'Claro BR'},
'553198242':{'en': 'Claro BR'},
'553198243':{'en': 'Claro BR'},
'553198244':{'en': 'Claro BR'},
'553198245':{'en': 'Claro BR'},
'5531983':{'en': 'Claro BR'},
'5531984':{'en': 'Claro BR'},
'5531985':{'en': 'Oi'},
'5531986':{'en': 'Oi'},
'5531987':{'en': 'Oi'},
'5531988':{'en': 'Oi'},
'5531989':{'en': 'Oi'},
'553199101':{'en': 'TIM'},
'553199102':{'en': 'TIM'},
'553199103':{'en': 'TIM'},
'553199104':{'en': 'TIM'},
'553199105':{'en': 'TIM'},
'553199106':{'en': 'TIM'},
'553199107':{'en': 'TIM'},
'553199108':{'en': 'TIM'},
'553199109':{'en': 'TIM'},
'55319911':{'en': 'TIM'},
'55319912':{'en': 'TIM'},
'55319913':{'en': 'TIM'},
'55319914':{'en': 'TIM'},
'55319915':{'en': 'TIM'},
'553199161':{'en': 'TIM'},
'553199162':{'en': 'TIM'},
'553199163':{'en': 'TIM'},
'553199164':{'en': 'TIM'},
'553199165':{'en': 'TIM'},
'553199166':{'en': 'TIM'},
'553199167':{'en': 'TIM'},
'553199168':{'en': 'TIM'},
'553199169':{'en': 'TIM'},
'553199171':{'en': 'TIM'},
'553199172':{'en': 'TIM'},
'553199173':{'en': 'TIM'},
'553199174':{'en': 'TIM'},
'553199175':{'en': 'TIM'},
'553199176':{'en': 'TIM'},
'553199177':{'en': 'TIM'},
'553199178':{'en': 'TIM'},
'553199179':{'en': 'TIM'},
'553199181':{'en': 'TIM'},
'553199182':{'en': 'TIM'},
'553199183':{'en': 'TIM'},
'553199184':{'en': 'TIM'},
'553199185':{'en': 'TIM'},
'553199186':{'en': 'TIM'},
'553199187':{'en': 'TIM'},
'553199188':{'en': 'TIM'},
'553199189':{'en': 'TIM'},
'553199191':{'en': 'TIM'},
'553199192':{'en': 'TIM'},
'553199193':{'en': 'TIM'},
'553199194':{'en': 'TIM'},
'553199195':{'en': 'TIM'},
'553199196':{'en': 'TIM'},
'553199197':{'en': 'TIM'},
'553199198':{'en': 'TIM'},
'553199199':{'en': 'TIM'},
'5531992':{'en': 'TIM'},
'5531993':{'en': 'TIM'},
'553199401':{'en': 'TIM'},
'553199402':{'en': 'TIM'},
'553199403':{'en': 'TIM'},
'553199404':{'en': 'TIM'},
'553199405':{'en': 'TIM'},
'553199406':{'en': 'TIM'},
'553199407':{'en': 'TIM'},
'553199408':{'en': 'TIM'},
'553199409':{'en': 'TIM'},
'553199411':{'en': 'TIM'},
'553199412':{'en': 'TIM'},
'553199413':{'en': 'TIM'},
'553199414':{'en': 'TIM'},
'553199415':{'en': 'TIM'},
'553199416':{'en': 'TIM'},
'553199601':{'en': 'Telemig Celular'},
'553199602':{'en': 'Telemig Celular'},
'553199603':{'en': 'Telemig Celular'},
'553199604':{'en': 'Telemig Celular'},
'553199605':{'en': 'Telemig Celular'},
'553199606':{'en': 'Telemig Celular'},
'553199607':{'en': 'Telemig Celular'},
'553199608':{'en': 'Telemig Celular'},
'553199609':{'en': 'Telemig Celular'},
'553199611':{'en': 'Telemig Celular'},
'553199612':{'en': 'Telemig Celular'},
'553199613':{'en': 'Telemig Celular'},
'553199614':{'en': 'Telemig Celular'},
'553199615':{'en': 'Telemig Celular'},
'553199616':{'en': 'Telemig Celular'},
'553199617':{'en': 'Telemig Celular'},
'553199618':{'en': 'Telemig Celular'},
'553199619':{'en': 'Telemig Celular'},
'553199621':{'en': 'Telemig Celular'},
'553199622':{'en': 'Telemig Celular'},
'553199624':{'en': 'Telemig Celular'},
'553199625':{'en': 'Telemig Celular'},
'553199626':{'en': 'Telemig Celular'},
'553199627':{'en': 'Telemig Celular'},
'553199628':{'en': 'Telemig Celular'},
'553199629':{'en': 'Telemig Celular'},
'553199631':{'en': 'Telemig Celular'},
'553199632':{'en': 'Telemig Celular'},
'553199633':{'en': 'Telemig Celular'},
'553199634':{'en': 'Telemig Celular'},
'553199635':{'en': 'Telemig Celular'},
'553199636':{'en': 'Telemig Celular'},
'553199637':{'en': 'Telemig Celular'},
'553199638':{'en': 'Telemig Celular'},
'553199639':{'en': 'Telemig Celular'},
'553199641':{'en': 'Telemig Celular'},
'553199642':{'en': 'Telemig Celular'},
'553199643':{'en': 'Telemig Celular'},
'553199644':{'en': 'Telemig Celular'},
'553199645':{'en': 'Telemig Celular'},
'553199646':{'en': 'Telemig Celular'},
'553199647':{'en': 'Telemig Celular'},
'553199648':{'en': 'Telemig Celular'},
'553199649':{'en': 'Telemig Celular'},
'553199651':{'en': 'Telemig Celular'},
'553199652':{'en': 'Telemig Celular'},
'553199653':{'en': 'Telemig Celular'},
'553199654':{'en': 'Telemig Celular'},
'553199655':{'en': 'Telemig Celular'},
'553199656':{'en': 'Telemig Celular'},
'553199657':{'en': 'Telemig Celular'},
'553199658':{'en': 'Telemig Celular'},
'553199659':{'en': 'Telemig Celular'},
'553199661':{'en': 'Telemig Celular'},
'553199662':{'en': 'Telemig Celular'},
'553199663':{'en': 'Telemig Celular'},
'553199664':{'en': 'Telemig Celular'},
'553199665':{'en': 'Telemig Celular'},
'553199666':{'en': 'Telemig Celular'},
'553199667':{'en': 'Telemig Celular'},
'553199668':{'en': 'Telemig Celular'},
'553199669':{'en': 'Telemig Celular'},
'553199671':{'en': 'Telemig Celular'},
'553199672':{'en': 'Telemig Celular'},
'553199673':{'en': 'Telemig Celular'},
'553199674':{'en': 'Telemig Celular'},
'553199675':{'en': 'Telemig Celular'},
'553199676':{'en': 'Telemig Celular'},
'553199677':{'en': 'Telemig Celular'},
'553199678':{'en': 'Telemig Celular'},
'553199679':{'en': 'Telemig Celular'},
'553199681':{'en': 'Telemig Celular'},
'553199682':{'en': 'Telemig Celular'},
'553199683':{'en': 'Telemig Celular'},
'553199684':{'en': 'Telemig Celular'},
'553199685':{'en': 'Telemig Celular'},
'553199686':{'en': 'Telemig Celular'},
'553199687':{'en': 'Telemig Celular'},
'553199688':{'en': 'Telemig Celular'},
'553199689':{'en': 'Telemig Celular'},
'553199691':{'en': 'Telemig Celular'},
'553199692':{'en': 'Telemig Celular'},
'553199693':{'en': 'Telemig Celular'},
'553199694':{'en': 'Telemig Celular'},
'553199695':{'en': 'Telemig Celular'},
'553199696':{'en': 'Telemig Celular'},
'553199697':{'en': 'Telemig Celular'},
'553199698':{'en': 'Telemig Celular'},
'553199699':{'en': 'Telemig Celular'},
'553199701':{'en': 'Telemig Celular'},
'553199702':{'en': 'Telemig Celular'},
'553199703':{'en': 'Telemig Celular'},
'553199704':{'en': 'Telemig Celular'},
'553199705':{'en': 'Telemig Celular'},
'553199706':{'en': 'Telemig Celular'},
'553199707':{'en': 'Telemig Celular'},
'553199708':{'en': 'Telemig Celular'},
'553199709':{'en': 'Telemig Celular'},
'553199711':{'en': 'Telemig Celular'},
'553199712':{'en': 'Telemig Celular'},
'553199713':{'en': 'Telemig Celular'},
'553199714':{'en': 'Telemig Celular'},
'553199715':{'en': 'Telemig Celular'},
'553199717':{'en': 'Telemig Celular'},
'553199718':{'en': 'Telemig Celular'},
'553199719':{'en': 'Telemig Celular'},
'553199721':{'en': 'Telemig Celular'},
'553199722':{'en': 'Telemig Celular'},
'553199723':{'en': 'Telemig Celular'},
'553199724':{'en': 'Telemig Celular'},
'553199725':{'en': 'Telemig Celular'},
'553199726':{'en': 'Telemig Celular'},
'553199728':{'en': 'Telemig Celular'},
'553199729':{'en': 'Telemig Celular'},
'553199731':{'en': 'Telemig Celular'},
'553199732':{'en': 'Telemig Celular'},
'553199733':{'en': 'Telemig Celular'},
'553199734':{'en': 'Telemig Celular'},
'553199735':{'en': 'Telemig Celular'},
'553199736':{'en': 'Telemig Celular'},
'553199737':{'en': 'Telemig Celular'},
'553199738':{'en': 'Telemig Celular'},
'553199739':{'en': 'Telemig Celular'},
'553199741':{'en': 'Telemig Celular'},
'553199742':{'en': 'Telemig Celular'},
'553199743':{'en': 'Telemig Celular'},
'553199744':{'en': 'Telemig Celular'},
'553199745':{'en': 'Telemig Celular'},
'553199746':{'en': 'Telemig Celular'},
'553199747':{'en': 'Telemig Celular'},
'553199748':{'en': 'Telemig Celular'},
'553199749':{'en': 'Telemig Celular'},
'553199751':{'en': 'Telemig Celular'},
'553199752':{'en': 'Telemig Celular'},
'553199753':{'en': 'Telemig Celular'},
'553199755':{'en': 'Telemig Celular'},
'553199756':{'en': 'Telemig Celular'},
'553199757':{'en': 'Telemig Celular'},
'553199758':{'en': 'Telemig Celular'},
'553199759':{'en': 'Telemig Celular'},
'553199761':{'en': 'Telemig Celular'},
'553199762':{'en': 'Telemig Celular'},
'553199763':{'en': 'Telemig Celular'},
'553199764':{'en': 'Telemig Celular'},
'553199765':{'en': 'Telemig Celular'},
'553199766':{'en': 'Telemig Celular'},
'553199767':{'en': 'Telemig Celular'},
'553199768':{'en': 'Telemig Celular'},
'553199769':{'en': 'Telemig Celular'},
'553199771':{'en': 'Telemig Celular'},
'553199772':{'en': 'Telemig Celular'},
'553199773':{'en': 'Telemig Celular'},
'553199774':{'en': 'Telemig Celular'},
'553199775':{'en': 'Telemig Celular'},
'553199776':{'en': 'Telemig Celular'},
'553199777':{'en': 'Telemig Celular'},
'553199778':{'en': 'Telemig Celular'},
'553199779':{'en': 'Telemig Celular'},
'553199781':{'en': 'Telemig Celular'},
'553199782':{'en': 'Telemig Celular'},
'553199783':{'en': 'Telemig Celular'},
'553199784':{'en': 'Telemig Celular'},
'553199785':{'en': 'Telemig Celular'},
'553199786':{'en': 'Telemig Celular'},
'553199787':{'en': 'Telemig Celular'},
'553199788':{'en': 'Telemig Celular'},
'553199789':{'en': 'Telemig Celular'},
'553199791':{'en': 'Telemig Celular'},
'553199792':{'en': 'Telemig Celular'},
'553199793':{'en': 'Telemig Celular'},
'553199794':{'en': 'Telemig Celular'},
'553199795':{'en': 'Telemig Celular'},
'553199796':{'en': 'Telemig Celular'},
'553199797':{'en': 'Telemig Celular'},
'553199798':{'en': 'Telemig Celular'},
'553199799':{'en': 'Telemig Celular'},
'5531998':{'en': 'Telemig Celular'},
'553199800':{'en': 'TIM'},
'553199810':{'en': 'TIM'},
'553199820':{'en': 'TIM'},
'553199830':{'en': 'TIM'},
'553199840':{'en': 'TIM'},
'553199850':{'en': 'TIM'},
'553199860':{'en': 'TIM'},
'553199870':{'en': 'TIM'},
'553199880':{'en': 'TIM'},
'553199890':{'en': 'TIM'},
'553199901':{'en': 'Telemig Celular'},
'553199902':{'en': 'Telemig Celular'},
'553199903':{'en': 'Telemig Celular'},
'553199904':{'en': 'Telemig Celular'},
'553199905':{'en': 'Telemig Celular'},
'553199906':{'en': 'Telemig Celular'},
'553199907':{'en': 'Telemig Celular'},
'553199908':{'en': 'Telemig Celular'},
'553199909':{'en': 'Telemig Celular'},
'553199911':{'en': 'Telemig Celular'},
'553199912':{'en': 'Telemig Celular'},
'553199913':{'en': 'Telemig Celular'},
'553199914':{'en': 'Telemig Celular'},
'553199915':{'en': 'Telemig Celular'},
'553199916':{'en': 'Telemig Celular'},
'553199917':{'en': 'Telemig Celular'},
'553199918':{'en': 'Telemig Celular'},
'553199919':{'en': 'Telemig Celular'},
'553199921':{'en': 'Telemig Celular'},
'553199922':{'en': 'Telemig Celular'},
'553199923':{'en': 'Telemig Celular'},
'553199924':{'en': 'Telemig Celular'},
'553199925':{'en': 'Telemig Celular'},
'553199926':{'en': 'Telemig Celular'},
'553199927':{'en': 'Telemig Celular'},
'553199928':{'en': 'Telemig Celular'},
'553199929':{'en': 'Telemig Celular'},
'553199931':{'en': 'Telemig Celular'},
'553199932':{'en': 'Telemig Celular'},
'553199933':{'en': 'Telemig Celular'},
'553199934':{'en': 'Telemig Celular'},
'553199935':{'en': 'Telemig Celular'},
'553199936':{'en': 'Telemig Celular'},
'553199937':{'en': 'Telemig Celular'},
'553199938':{'en': 'Telemig Celular'},
'553199939':{'en': 'Telemig Celular'},
'553199941':{'en': 'Telemig Celular'},
'553199942':{'en': 'Telemig Celular'},
'553199943':{'en': 'Telemig Celular'},
'553199944':{'en': 'Telemig Celular'},
'553199945':{'en': 'Telemig Celular'},
'553199946':{'en': 'Telemig Celular'},
'553199947':{'en': 'Telemig Celular'},
'553199948':{'en': 'Telemig Celular'},
'553199949':{'en': 'Telemig Celular'},
'55319995':{'en': 'Telemig Celular'},
'55319996':{'en': 'Telemig Celular'},
'55319997':{'en': 'Telemig Celular'},
'55319998':{'en': 'Telemig Celular'},
'55319999':{'en': 'Telemig Celular'},
'55329840':{'en': 'Claro BR'},
'55329841':{'en': 'Claro BR'},
'55329842':{'en': 'Claro BR'},
'55329843':{'en': 'Claro BR'},
'55329844':{'en': 'Claro BR'},
'55329845':{'en': 'Claro BR'},
'55329846':{'en': 'Claro BR'},
'55329847':{'en': 'Claro BR'},
'553298480':{'en': 'Claro BR'},
'553298481':{'en': 'Claro BR'},
'553298482':{'en': 'Claro BR'},
'553298483':{'en': 'Claro BR'},
'553298484':{'en': 'Claro BR'},
'553298485':{'en': 'Claro BR'},
'5532985':{'en': 'Oi'},
'5532986':{'en': 'Oi'},
'5532987':{'en': 'Oi'},
'5532988':{'en': 'Oi'},
'5532989':{'en': 'Oi'},
'553299101':{'en': 'TIM'},
'553299102':{'en': 'TIM'},
'553299103':{'en': 'TIM'},
'553299104':{'en': 'TIM'},
'553299105':{'en': 'TIM'},
'553299106':{'en': 'TIM'},
'553299107':{'en': 'TIM'},
'553299108':{'en': 'TIM'},
'553299109':{'en': 'TIM'},
'553299111':{'en': 'TIM'},
'553299112':{'en': 'TIM'},
'553299113':{'en': 'TIM'},
'553299114':{'en': 'TIM'},
'553299115':{'en': 'TIM'},
'553299116':{'en': 'TIM'},
'553299117':{'en': 'TIM'},
'553299118':{'en': 'TIM'},
'553299119':{'en': 'TIM'},
'553299121':{'en': 'TIM'},
'553299122':{'en': 'TIM'},
'553299123':{'en': 'TIM'},
'553299124':{'en': 'TIM'},
'553299125':{'en': 'TIM'},
'553299126':{'en': 'TIM'},
'553299127':{'en': 'TIM'},
'553299128':{'en': 'TIM'},
'553299129':{'en': 'TIM'},
'553299131':{'en': 'TIM'},
'553299132':{'en': 'TIM'},
'553299133':{'en': 'TIM'},
'553299134':{'en': 'TIM'},
'553299135':{'en': 'TIM'},
'553299136':{'en': 'TIM'},
'553299137':{'en': 'TIM'},
'553299138':{'en': 'TIM'},
'553299139':{'en': 'TIM'},
'553299141':{'en': 'TIM'},
'553299142':{'en': 'TIM'},
'553299143':{'en': 'TIM'},
'553299144':{'en': 'TIM'},
'553299145':{'en': 'TIM'},
'553299146':{'en': 'TIM'},
'553299193':{'en': 'TIM'},
'553299194':{'en': 'TIM'},
'553299195':{'en': 'TIM'},
'553299197':{'en': 'TIM'},
'553299198':{'en': 'TIM'},
'553299199':{'en': 'TIM'},
'553299901':{'en': 'Telemig Celular'},
'553299902':{'en': 'Telemig Celular'},
'553299903':{'en': 'Telemig Celular'},
'553299904':{'en': 'Telemig Celular'},
'553299905':{'en': 'Telemig Celular'},
'553299906':{'en': 'Telemig Celular'},
'553299907':{'en': 'Telemig Celular'},
'553299908':{'en': 'Telemig Celular'},
'553299909':{'en': 'Telemig Celular'},
'553299911':{'en': 'Telemig Celular'},
'553299912':{'en': 'Telemig Celular'},
'553299913':{'en': 'Telemig Celular'},
'553299914':{'en': 'Telemig Celular'},
'553299917':{'en': 'Telemig Celular'},
'553299918':{'en': 'Telemig Celular'},
'553299919':{'en': 'Telemig Celular'},
'553299921':{'en': 'Telemig Celular'},
'553299922':{'en': 'Telemig Celular'},
'553299923':{'en': 'Telemig Celular'},
'553299924':{'en': 'Telemig Celular'},
'553299925':{'en': 'Telemig Celular'},
'553299931':{'en': 'Telemig Celular'},
'553299932':{'en': 'Telemig Celular'},
'553299933':{'en': 'Telemig Celular'},
'553299934':{'en': 'Telemig Celular'},
'553299935':{'en': 'Telemig Celular'},
'553299936':{'en': 'Telemig Celular'},
'553299937':{'en': 'Telemig Celular'},
'553299938':{'en': 'Telemig Celular'},
'553299939':{'en': 'Telemig Celular'},
'553299941':{'en': 'Telemig Celular'},
'553299942':{'en': 'Telemig Celular'},
'553299943':{'en': 'Telemig Celular'},
'553299944':{'en': 'Telemig Celular'},
'553299945':{'en': 'Telemig Celular'},
'553299946':{'en': 'Telemig Celular'},
'553299947':{'en': 'Telemig Celular'},
'553299948':{'en': 'Telemig Celular'},
'553299949':{'en': 'Telemig Celular'},
'553299951':{'en': 'Telemig Celular'},
'553299952':{'en': 'Telemig Celular'},
'553299953':{'en': 'Telemig Celular'},
'553299954':{'en': 'Telemig Celular'},
'553299955':{'en': 'Telemig Celular'},
'553299956':{'en': 'Telemig Celular'},
'553299957':{'en': 'Telemig Celular'},
'553299958':{'en': 'Telemig Celular'},
'553299959':{'en': 'Telemig Celular'},
'55329996':{'en': 'Telemig Celular'},
'553299971':{'en': 'Telemig Celular'},
'553299972':{'en': 'Telemig Celular'},
'553299973':{'en': 'Telemig Celular'},
'553299974':{'en': 'Telemig Celular'},
'553299975':{'en': 'Telemig Celular'},
'553299976':{'en': 'Telemig Celular'},
'553299977':{'en': 'Telemig Celular'},
'553299979':{'en': 'Telemig Celular'},
'55329998':{'en': 'Telemig Celular'},
'553299991':{'en': 'Telemig Celular'},
'553299992':{'en': 'Telemig Celular'},
'553299993':{'en': 'Telemig Celular'},
'553299994':{'en': 'Telemig Celular'},
'553299995':{'en': 'Telemig Celular'},
'553299996':{'en': 'Telemig Celular'},
'553299997':{'en': 'Telemig Celular'},
'553299998':{'en': 'Telemig Celular'},
'553398401':{'en': 'Claro BR'},
'553398402':{'en': 'Claro BR'},
'553398403':{'en': 'Claro BR'},
'553398404':{'en': 'Claro BR'},
'553398405':{'en': 'Claro BR'},
'553398406':{'en': 'Claro BR'},
'553398407':{'en': 'Claro BR'},
'553398408':{'en': 'Claro BR'},
'553398409':{'en': 'Claro BR'},
'553398411':{'en': 'Claro BR'},
'553398412':{'en': 'Claro BR'},
'553398413':{'en': 'Claro BR'},
'553398414':{'en': 'Claro BR'},
'553398415':{'en': 'Claro BR'},
'553398416':{'en': 'Claro BR'},
'553398417':{'en': 'Claro BR'},
'553398418':{'en': 'Claro BR'},
'553398419':{'en': 'Claro BR'},
'553398421':{'en': 'Claro BR'},
'553398422':{'en': 'Claro BR'},
'553398423':{'en': 'Claro BR'},
'553398424':{'en': 'Claro BR'},
'553398425':{'en': 'Claro BR'},
'553398426':{'en': 'Claro BR'},
'553398427':{'en': 'Claro BR'},
'553398428':{'en': 'Claro BR'},
'553398429':{'en': 'Claro BR'},
'553398431':{'en': 'Claro BR'},
'553398432':{'en': 'Claro BR'},
'553398433':{'en': 'Claro BR'},
'553398434':{'en': 'Claro BR'},
'553398435':{'en': 'Claro BR'},
'553398436':{'en': 'Claro BR'},
'553398437':{'en': 'Claro BR'},
'553398438':{'en': 'Claro BR'},
'553398439':{'en': 'Claro BR'},
'553398441':{'en': 'Claro BR'},
'553398442':{'en': 'Claro BR'},
'553398443':{'en': 'Claro BR'},
'553398444':{'en': 'Claro BR'},
'553398445':{'en': 'Claro BR'},
'553398446':{'en': 'Claro BR'},
'553398447':{'en': 'Claro BR'},
'553398448':{'en': 'Claro BR'},
'553398449':{'en': 'Claro BR'},
'553398451':{'en': 'Claro BR'},
'553398452':{'en': 'Claro BR'},
'553398453':{'en': 'Claro BR'},
'553398454':{'en': 'Claro BR'},
'553398455':{'en': 'Claro BR'},
'553398456':{'en': 'Claro BR'},
'5533985':{'en': 'Oi'},
'5533986':{'en': 'Oi'},
'5533987':{'en': 'Oi'},
'5533988':{'en': 'Oi'},
'5533989':{'en': 'Oi'},
'553399101':{'en': 'TIM'},
'553399102':{'en': 'TIM'},
'553399103':{'en': 'TIM'},
'553399104':{'en': 'TIM'},
'553399105':{'en': 'TIM'},
'553399106':{'en': 'TIM'},
'553399107':{'en': 'TIM'},
'553399108':{'en': 'TIM'},
'553399109':{'en': 'TIM'},
'553399111':{'en': 'TIM'},
'553399112':{'en': 'TIM'},
'553399113':{'en': 'TIM'},
'553399114':{'en': 'TIM'},
'553399115':{'en': 'TIM'},
'553399116':{'en': 'TIM'},
'553399117':{'en': 'TIM'},
'553399118':{'en': 'TIM'},
'553399119':{'en': 'TIM'},
'553399121':{'en': 'TIM'},
'553399122':{'en': 'TIM'},
'553399123':{'en': 'TIM'},
'553399124':{'en': 'TIM'},
'553399125':{'en': 'TIM'},
'553399126':{'en': 'TIM'},
}
|
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read invidual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from .base import numeric_types
from . import ndarray as nd
from . import _ndarray_internal as _internal
from ._ndarray_internal import _cvimresize as imresize
from ._ndarray_internal import _cvcopyMakeBorder as copyMakeBorder
from . import io
from . import recordio
def imdecode(buf, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Default method is bicubic interpolation.
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h / w, size
else:
new_h, new_w = size, size * w / h
return imresize(src, new_w, new_h, interp=interp)
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size."""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
out = imresize(out, *size, interp=interp)
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: Interpolation method to be used in case the size is larger (default: bicubic).
Uses OpenCV convention for the parameters. Nearest - 0, Bilinear - 1, Bicubic - 2,
Area - 3. See OpenCV imresize function for more details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : interpolation, optional, default=Area-based
The type of interpolation that is done to the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std."""
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, min_area, ratio, interp=2):
"""Randomly crop src with size. Randomize area and aspect ratio."""
h, w, _ = src.shape
new_ratio = random.uniform(*ratio)
if new_ratio * h > w:
max_area = w * int(w / new_ratio)
else:
max_area = h * int(h * new_ratio)
min_area *= h * w
if max_area < min_area:
return random_crop(src, size, interp)
new_area = random.uniform(min_area, max_area)
new_w = int(np.sqrt(new_area * new_ratio))
new_h = int(np.sqrt(new_area / new_ratio))
assert new_w <= w and new_h <= h
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def ResizeAug(size, interp=2):
"""Make resize shorter edge to size augmenter."""
def aug(src):
"""Augmenter body"""
return [resize_short(src, size, interp)]
return aug
def RandomCropAug(size, interp=2):
"""Make random crop augmenter"""
def aug(src):
"""Augmenter body"""
return [random_crop(src, size, interp)[0]]
return aug
def RandomSizedCropAug(size, min_area, ratio, interp=2):
"""Make random crop with random resizing and random aspect ratio jitter augmenter."""
def aug(src):
"""Augmenter body"""
return [random_size_crop(src, size, min_area, ratio, interp)[0]]
return aug
def CenterCropAug(size, interp=2):
"""Make center crop augmenter."""
def aug(src):
"""Augmenter body"""
return [center_crop(src, size, interp)[0]]
return aug
def RandomOrderAug(ts):
"""Apply list of augmenters in random order"""
def aug(src):
"""Augmenter body"""
src = [src]
random.shuffle(ts)
for t in ts:
src = [j for i in src for j in t(i)]
return src
return aug
def ColorJitterAug(brightness, contrast, saturation):
"""Apply random brightness, contrast and saturation jitter in random order."""
ts = []
coef = nd.array([[[0.299, 0.587, 0.114]]])
if brightness > 0:
def baug(src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-brightness, brightness)
src *= alpha
return [src]
ts.append(baug)
if contrast > 0:
def caug(src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-contrast, contrast)
gray = src * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return [src]
ts.append(caug)
if saturation > 0:
def saug(src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-saturation, saturation)
gray = src * coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return [src]
ts.append(saug)
return RandomOrderAug(ts)
def LightingAug(alphastd, eigval, eigvec):
"""Add PCA based noise."""
def aug(src):
"""Augmenter body"""
alpha = np.random.normal(0, alphastd, size=(3,))
rgb = np.dot(eigvec * alpha, eigval)
src += nd.array(rgb)
return [src]
return aug
def ColorNormalizeAug(mean, std):
"""Mean and std normalization."""
mean = nd.array(mean)
std = nd.array(std)
def aug(src):
"""Augmenter body"""
return [color_normalize(src, mean, std)]
return aug
def HorizontalFlipAug(p):
"""Random horizontal flipping."""
def aug(src):
"""Augmenter body"""
if random.random() < p:
src = nd.flip(src, axis=1)
return [src]
return aug
def CastAug():
"""Cast to float32"""
def aug(src):
"""Augmenter body"""
src = src.astype(np.float32)
return [src]
return aug
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0,
pca_noise=0, inter_method=2):
"""Creates an augmenter list."""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.3, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, np.ndarray) and mean.shape[0] in [1, 3]
if std is True:
std = np.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, np.ndarray) and std.shape[0] in [1, 3]
if mean is not None and std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
if path_imgrec:
print('loading recordio...')
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
print('loading image list...')
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array([float(i) for i in line[1:-1]])
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
print('loading image list...')
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if isinstance(img[0], numeric_types):
label = nd.array([img[0]])
else:
label = nd.array(img[0])
result[key] = (label, img[1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N / num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
def next_sample(self):
"""Helper function for reading in next sample."""
if self.seq is not None:
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
label, s = self.next_sample()
data = [self.imdecode(s)]
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
for datum in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i][:] = self.postprocess_data(datum)
batch_label[i][:] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return io.DataBatch([batch_data], [batch_label], batch_size - i)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
return imdecode(s)
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
'\xff\xd8\xff\xe0\x00...'
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = [ret for src in data for ret in aug(src)]
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
"""
A small Test application to show how to use Flask-MQTT.
"""
import eventlet
import json
from flask import Flask, render_template
from flask_mqtt import Mqtt
from flask_socketio import SocketIO
from flask_bootstrap import Bootstrap
eventlet.monkey_patch()
app = Flask(__name__)
app.config['SECRET'] = 'my secret key'
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.config['MQTT_BROKER_URL'] = 'broker.hivemq.com'
app.config['MQTT_BROKER_PORT'] = 1883
app.config['MQTT_USERNAME'] = ''
app.config['MQTT_PASSWORD'] = ''
app.config['MQTT_KEEPALIVE'] = 5
app.config['MQTT_TLS_ENABLED'] = False
mqtt = Mqtt(app)
socketio = SocketIO(app)
bootstrap = Bootstrap(app)
@socketio.on('publish')
def handle_publish(json_str):
data = json.loads(json_str)
#mqtt.publish(data['topic'], data['message'])
@socketio.on('subscribe')
def handle_subscribe(json_str):
data = json.loads(json_str)
#mqtt.subscribe(data['topic'])
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
data = dict(
topic=message.topic,
payload=message.payload.decode()
)
print('Server 2: Received message', data['payload'], 'from topic: ', data['topic'])
socketio.emit('mqtt_message', data=data)
@mqtt.on_log()
def handle_logging(client, userdata, level, buf):
print(level, buf)
if __name__ == '__main__':
#print('Server 2: subscribing to rmpbpp')
#socketio.emit('subscribe', None)
mqtt.subscribe('channel01')
socketio.run(app, host='0.0.0.0', port=5001, use_reloader=True, debug=True)
|
#
# CRC32 hash implementation
# using polynomial 0x04c11db7
# _author: nagaganesh jaladanki
#
class crc32():
def __init__(self, data=None):
self.poly = 0x04c11db7
if data != None:
self.update(data)
def update(self, bytestring):
if type(bytestring) != bytes:
bytestring = str.encode(bytestring)
bytestring = bytearray(bytestring)
padded = bytestring + bytearray(len(self.poly
return self
@property
def digest(self):
return bytes(self.hash)
@property
def hexdigest(self):
return self.hash.hex()
|
import cv2
import time
import numpy as np
import sys
sys.path.append("../")
from train_models.MTCNN_config import config
from Detection.nms import py_nms
class MtcnnDetector(object):
def __init__(self,
detectors,
min_face_size=25,
stride=2,
threshold=[0.6, 0.7, 0.7],
scale_factor=0.79,
#scale_factor=0.709,#change
slide_window=False):
self.pnet_detector = detectors[0]
self.rnet_detector = detectors[1]
self.onet_detector = detectors[2]
self.min_face_size = min_face_size
self.stride = stride
self.thresh = threshold
self.scale_factor = scale_factor
self.slide_window = slide_window
def convert_to_square(self, bbox):
"""
convert bbox to square
Parameters:
----------
bbox: numpy array , shape n x 5
input bbox
Returns:
-------
square bbox
"""
square_bbox = bbox.copy()
h = bbox[:, 3] - bbox[:, 1] + 1
w = bbox[:, 2] - bbox[:, 0] + 1
max_side = np.maximum(h, w)
square_bbox[:, 0] = bbox[:, 0] + w * 0.5 - max_side * 0.5
square_bbox[:, 1] = bbox[:, 1] + h * 0.5 - max_side * 0.5
square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1
square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1
return square_bbox
def calibrate_box(self, bbox, reg):
"""
calibrate bboxes
Parameters:
----------
bbox: numpy array, shape n x 5
input bboxes
reg: numpy array, shape n x 4
bboxes adjustment
Returns:
-------
bboxes after refinement
"""
bbox_c = bbox.copy()
w = bbox[:, 2] - bbox[:, 0] + 1
w = np.expand_dims(w, 1)
h = bbox[:, 3] - bbox[:, 1] + 1
h = np.expand_dims(h, 1)
reg_m = np.hstack([w, h, w, h])
aug = reg_m * reg
bbox_c[:, 0:4] = bbox_c[:, 0:4] + aug
return bbox_c
def generate_bbox(self, cls_map, reg, scale, threshold):
"""
generate bbox from feature cls_map
Parameters:
----------
cls_map: numpy array , n x m
detect score for each position
reg: numpy array , n x m x 4
bbox
scale: float number
scale of this detection
threshold: float number
detect threshold
Returns:
-------
bbox array
"""
stride = 2
#stride = 4
cellsize = 12
#cellsize = 25
t_index = np.where(cls_map > threshold)
# find nothing
if t_index[0].size == 0:
return np.array([])
#offset
dx1, dy1, dx2, dy2 = [reg[t_index[0], t_index[1], i] for i in range(4)]
reg = np.array([dx1, dy1, dx2, dy2])
score = cls_map[t_index[0], t_index[1]]
boundingbox = np.vstack([np.round((stride * t_index[1]) / scale),
np.round((stride * t_index[0]) / scale),
np.round((stride * t_index[1] + cellsize) / scale),
np.round((stride * t_index[0] + cellsize) / scale),
score,
reg])
return boundingbox.T
#pre-process images
def processed_image(self, img, scale):
height, width, channels = img.shape
new_height = int(height * scale) # resized new height
new_width = int(width * scale) # resized new width
new_dim = (new_width, new_height)
img_resized = cv2.resize(img, new_dim, interpolation=cv2.INTER_LINEAR) # resized image
img_resized = (img_resized - 127.5) / 128
return img_resized
def pad(self, bboxes, w, h):
"""
pad the the bboxes, alse restrict the size of it
Parameters:
----------
bboxes: numpy array, n x 5
input bboxes
w: float number
width of the input image
h: float number
height of the input image
Returns :
------
dy, dx : numpy array, n x 1
start point of the bbox in target image
edy, edx : numpy array, n x 1
end point of the bbox in target image
y, x : numpy array, n x 1
start point of the bbox in original image
ex, ex : numpy array, n x 1
end point of the bbox in original image
tmph, tmpw: numpy array, n x 1
height and width of the bbox
"""
tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1
num_box = bboxes.shape[0]
dx, dy = np.zeros((num_box,)), np.zeros((num_box,))
edx, edy = tmpw.copy() - 1, tmph.copy() - 1
x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
tmp_index = np.where(ex > w - 1)
edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]
ex[tmp_index] = w - 1
tmp_index = np.where(ey > h - 1)
edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]
ey[tmp_index] = h - 1
tmp_index = np.where(x < 0)
dx[tmp_index] = 0 - x[tmp_index]
x[tmp_index] = 0
tmp_index = np.where(y < 0)
dy[tmp_index] = 0 - y[tmp_index]
y[tmp_index] = 0
return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
return_list = [item.astype(np.int32) for item in return_list]
return return_list
def detect_pnet(self, im):
"""Get face candidates through pnet
Parameters:
----------
im: numpy array
input image array
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
net_size = 12
current_scale = float(net_size) / self.min_face_size # find initial scale
# print("current_scale", net_size, self.min_face_size, current_scale)
im_resized = self.processed_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
# fcn
all_boxes = list()
while min(current_height, current_width) > net_size:
#return the result predicted by pnet
#cls_cls_map : H*w*2
#reg: H*w*4
cls_cls_map, reg = self.pnet_detector.predict(im_resized)
#boxes: num*9(x1,y1,x2,y2,score,x1_offset,y1_offset,x2_offset,y2_offset)
boxes = self.generate_bbox(cls_cls_map[:, :,1], reg, current_scale, self.thresh[0])
current_scale *= self.scale_factor
im_resized = self.processed_image(im, current_scale)
current_height, current_width, _ = im_resized.shape
if boxes.size == 0:
continue
keep = py_nms(boxes[:, :5], 0.5, 'Union')
boxes = boxes[keep]
all_boxes.append(boxes)
if len(all_boxes) == 0:
return None, None, None
all_boxes = np.vstack(all_boxes)
# merge the detection from first stage
keep = py_nms(all_boxes[:, 0:5], 0.7, 'Union')
all_boxes = all_boxes[keep]
boxes = all_boxes[:, :5]
bbw = all_boxes[:, 2] - all_boxes[:, 0] + 1
bbh = all_boxes[:, 3] - all_boxes[:, 1] + 1
# refine the boxes
boxes_c = np.vstack([all_boxes[:, 0] + all_boxes[:, 5] * bbw,
all_boxes[:, 1] + all_boxes[:, 6] * bbh,
all_boxes[:, 2] + all_boxes[:, 7] * bbw,
all_boxes[:, 3] + all_boxes[:, 8] * bbh,
all_boxes[:, 4]])
boxes_c = boxes_c.T
return boxes, boxes_c, None
def detect_rnet(self, im, dets):
"""Get face candidates using rnet
Parameters:
----------
im: numpy array
input image array
dets: numpy array
detection results of pnet
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
dets = self.convert_to_square(dets)
dets[:, 0:4] = np.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)
num_boxes = dets.shape[0]
cropped_ims = np.zeros((num_boxes, 24, 24, 3), dtype=np.float32)
for i in range(num_boxes):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
cropped_ims[i, :, :, :] = (cv2.resize(tmp, (24, 24))-127.5) / 128
#cls_scores : num_data*2
#reg: num_data*4
#landmark: num_data*10
cls_scores, reg, _ = self.rnet_detector.predict(cropped_ims)
cls_scores = cls_scores[:,1]
keep_inds = np.where(cls_scores > self.thresh[1])[0]
if len(keep_inds) > 0:
boxes = dets[keep_inds]
boxes[:, 4] = cls_scores[keep_inds]
reg = reg[keep_inds]
#landmark = landmark[keep_inds]
else:
return None, None, None
keep = py_nms(boxes, 0.6)
boxes = boxes[keep]
boxes_c = self.calibrate_box(boxes, reg[keep])
return boxes, boxes_c,None
def detect_onet(self, im, dets):
"""Get face candidates using onet
Parameters:
----------
im: numpy array
input image array
dets: numpy array
detection results of rnet
Returns:
-------
boxes: numpy array
detected boxes before calibration
boxes_c: numpy array
boxes after calibration
"""
h, w, c = im.shape
dets = self.convert_to_square(dets)
dets[:, 0:4] = np.round(dets[:, 0:4])
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(dets, w, h)
num_boxes = dets.shape[0]
cropped_ims = np.zeros((num_boxes, 48, 48, 3), dtype=np.float32)
for i in range(num_boxes):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i] + 1, dx[i]:edx[i] + 1, :] = im[y[i]:ey[i] + 1, x[i]:ex[i] + 1, :]
cropped_ims[i, :, :, :] = (cv2.resize(tmp, (48, 48))-127.5) / 128
cls_scores, reg,landmark = self.onet_detector.predict(cropped_ims)
#prob belongs to face
cls_scores = cls_scores[:,1]
keep_inds = np.where(cls_scores > self.thresh[2])[0]
if len(keep_inds) > 0:
#pickout filtered box
boxes = dets[keep_inds]
boxes[:, 4] = cls_scores[keep_inds]
reg = reg[keep_inds]
landmark = landmark[keep_inds]
else:
return None, None, None
#width
w = boxes[:,2] - boxes[:,0] + 1
#height
h = boxes[:,3] - boxes[:,1] + 1
landmark[:,0::2] = (np.tile(w,(5,1)) * landmark[:,0::2].T + np.tile(boxes[:,0],(5,1)) - 1).T
landmark[:,1::2] = (np.tile(h,(5,1)) * landmark[:,1::2].T + np.tile(boxes[:,1],(5,1)) - 1).T
boxes_c = self.calibrate_box(boxes, reg)
boxes = boxes[py_nms(boxes, 0.6, "Minimum")]
keep = py_nms(boxes_c, 0.6, "Minimum")
boxes_c = boxes_c[keep]
landmark = landmark[keep]
return boxes, boxes_c,landmark
#use for video
def detect(self, img):
"""Detect face over image
"""
boxes = None
t = time.time()
# pnet
t1 = 0
if self.pnet_detector:
boxes, boxes_c,_ = self.detect_pnet(img)
if boxes_c is None:
return np.array([]),np.array([])
t1 = time.time() - t
t = time.time()
# rnet
t2 = 0
if self.rnet_detector:
boxes, boxes_c,_ = self.detect_rnet(img, boxes_c)
if boxes_c is None:
return np.array([]),np.array([])
t2 = time.time() - t
t = time.time()
# onet
t3 = 0
if self.onet_detector:
boxes, boxes_c,landmark = self.detect_onet(img, boxes_c)
if boxes_c is None:
return np.array([]),np.array([])
t3 = time.time() - t
t = time.time()
print(
"time cost " + '{:.3f}'.format(t1 + t2 + t3) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2,
t3))
return boxes_c,landmark
def detect_face(self, test_data):
all_boxes = []#save each image's bboxes
landmarks = []
batch_idx = 0
sum_time = 0
#test_data is iter_
for databatch in test_data:
#databatch(image returned)
if batch_idx % 100 == 0:
print("%d images done" % batch_idx)
im = databatch
# pnet
t1 = 0
if self.pnet_detector:
t = time.time()
#ignore landmark
boxes, boxes_c, landmark = self.detect_pnet(im)
t1 = time.time() - t
sum_time += t1
if boxes_c is None:
print("boxes_c is None...")
all_boxes.append(np.array([]))
#pay attention
landmarks.append(np.array([]))
batch_idx += 1
continue
# rnet
t2 = 0
if self.rnet_detector:
t = time.time()
#ignore landmark
boxes, boxes_c, landmark = self.detect_rnet(im, boxes_c)
t2 = time.time() - t
sum_time += t2
if boxes_c is None:
all_boxes.append(np.array([]))
landmarks.append(np.array([]))
batch_idx += 1
continue
# onet
t3 = 0
if self.onet_detector:
t = time.time()
boxes, boxes_c, landmark = self.detect_onet(im, boxes_c)
t3 = time.time() - t
sum_time += t3
if boxes_c is None:
all_boxes.append(np.array([]))
landmarks.append(np.array([]))
batch_idx += 1
continue
print(
"time cost " + '{:.3f}'.format(sum_time) + ' pnet {:.3f} rnet {:.3f} onet {:.3f}'.format(t1, t2,t3))
all_boxes.append(boxes_c)
landmarks.append(landmark)
batch_idx += 1
#num_of_data*9,num_of_data*10
return all_boxes,landmarks
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
class ExploreUsers(APIView):
def get(self, request, format=None):
last_five = models.User.objects.all().order_by('-date_joined')[:5]
serializer = serializers.ListUserSerializer(last_five, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
class FollowUser(APIView):
def post(self, request, user_id, format=None):
user = request.user
print(user)
try:
user_to_follow = models.User.objects.get(id=user_id)
print(user_to_follow)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user.following.add(user_to_follow)
user.save()
return Response(status=status.HTTP_200_OK)
class UnFollowUser(APIView):
def post(self, request, user_id, format=None):
user = request.user
try:
user_to_follow = models.User.objects.get(id=user_id)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user.following.remove(user_to_follow)
user.save()
return Response(status=status.HTTP_200_OK)
class UserProfile(APIView):
def get(self, request, username, format=None):
try:
found_user = models.User.objects.get(username=username)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.UserProfileSerializer(found_user)
return Response(data=serializer.data, status=status.HTTP_200_OK)
class UserFollowers(APIView):
def get(self, request, username, format=None):
try:
found_user = models.User.objects.get(username=username)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user_followers = found_user.followers.all()
serializer = serializers.ListUserSerializer( user_followers, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
class UserFollowing(APIView):
def get(self, request, username, format=None):
try:
found_user = models.User.objects.get(username=username)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user_following = found_user.following.all()
serializer = serializers.ListUserSerializer(user_following, many=True)
return Response(data=serializer.data, status=status.HTTP_200_OK)
|
from __future__ import annotations
from typing import List
x: List
def b(*, x: list[str]):
pass
|
"""Version details for maluforce"""
__title__ = "maluforce"
__description__ = "A basic Salesforce and Pandas interface"
__url__ = "https://github.com/rodrigoelemesmo/maluforce"
__version__ = "0.0.6"
__author__ = "Rodrigo Maluf"
__author_email__ = "rodrigo1793@gmail.com"
__license__ = "None"
__maintainer__ = "Rodrigo Maluf"
__maintainer_email__ = "rodrigo1793@gmail.com"
__keywords__ = "python salesforce salesforce.com pandas"
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import re
import pickle
import numpy as np
from collections import Counter
from functools import lru_cache
from . import constants
from .data_utils import tokenize
word_detector = re.compile('\w')
class VocabModel(object):
def __init__(self, data_set, config):
print('Building vocabs...')
(allWords, allEdgeTypes) = collect_vocabs(data_set)
print('Number of words: {}'.format(len(allWords)))
print('Number of edge types: {}'.format(len(allEdgeTypes)))
self.word_vocab = Vocab()
self.word_vocab.build_vocab(allWords, vocab_size=config['top_word_vocab'], min_freq=config['min_word_freq'])
if config.get('pretrained_word_embed_file', None):
self.word_vocab.load_embeddings(config['pretrained_word_embed_file'])
print('Using pretrained word embeddings')
else:
self.word_vocab.randomize_embeddings(config['word_embed_dim'])
print('Using randomized word embeddings')
print('word_vocab: {}'.format(self.word_vocab.embeddings.shape))
self.edge_vocab = Vocab()
self.edge_vocab.build_vocab(allEdgeTypes)
print('edge_vocab: {}'.format((self.edge_vocab.get_vocab_size())))
@classmethod
def build(cls, saved_vocab_file=None, data_set=None, config=None):
"""
Loads a Vocabulary from disk.
Args:
saved_vocab_file (str): path to the saved vocab file
data_set:
config:
Returns:
Vocabulary: loaded Vocabulary
"""
if os.path.exists(saved_vocab_file):
print('Loading pre-built vocab model stored in {}'.format(saved_vocab_file))
vocab_model = pickle.load(open(saved_vocab_file, 'rb'))
else:
vocab_model = VocabModel(data_set, config)
print('Saving vocab model to {}'.format(saved_vocab_file))
pickle.dump(vocab_model, open(saved_vocab_file, 'wb'))
return vocab_model
class Vocab(object):
def __init__(self):
self.PAD = 0
self.SOS = 1
self.EOS = 2
self.UNK = 3
self.pad_token = constants._PAD_TOKEN
self.sos_token = constants._SOS_TOKEN
self.eos_token = constants._EOS_TOKEN
self.unk_token = constants._UNK_TOKEN
self.reserved = [self.pad_token, self.sos_token, self.eos_token, self.unk_token]
self.index2word = self.reserved[:]
self.word2index = dict(zip(self.reserved, range(len(self.reserved))))
self.word2count = Counter()
self.embeddings = None
def build_vocab(self, vocab_counter, vocab_size=None, min_freq=1):
self.word2count = vocab_counter
self._add_words(vocab_counter.keys())
self._trim(vocab_size=vocab_size, min_freq=min_freq)
def _add_words(self, words):
for word in words:
if word not in self.word2index:
self.word2index[word] = len(self.index2word)
self.index2word.append(word)
assert len(self.word2index) == len(self.index2word)
def _trim(self, vocab_size: int=None, min_freq: int=1):
if min_freq <= 1 and (vocab_size is None or vocab_size >= len(self.word2index)):
return
ordered_words = sorted(((c, w) for (w, c) in self.word2count.items()), reverse=True)
if vocab_size:
ordered_words = ordered_words[:vocab_size]
self.index2word = self.reserved[:]
self.word2index = dict(zip(self.reserved, range(len(self.reserved))))
self.word2count = Counter()
for count, word in ordered_words:
if count < min_freq: break
if word not in self.word2index:
self.word2index[word] = len(self.index2word)
self.word2count[word] = count
self.index2word.append(word)
assert len(self.word2index) == len(self.index2word)
def load_embeddings(self, file_path, scale=0.08, dtype=np.float32):
hit_words = set()
vocab_size = len(self)
with open(file_path, 'rb') as f:
for line in f:
line = line.split()
word = line[0].decode('utf-8')
idx = self.word2index.get(word.lower(), None)
if idx is None or idx in hit_words:
continue
vec = np.array(line[1:], dtype=dtype)
if self.embeddings is None:
n_dims = len(vec)
self.embeddings = np.array(np.random.uniform(low=-scale, high=scale, size=(vocab_size, n_dims)), dtype=dtype)
self.embeddings[self.PAD] = np.zeros(n_dims)
self.embeddings[idx] = vec
hit_words.add(idx)
print('Pretrained word embeddings hit ratio: {}'.format(len(hit_words) / len(self.index2word)))
def randomize_embeddings(self, n_dims, scale=0.08):
vocab_size = self.get_vocab_size()
shape = (vocab_size, n_dims)
self.embeddings = np.array(np.random.uniform(low=-scale, high=scale, size=shape), dtype=np.float32)
self.embeddings[self.PAD] = np.zeros(n_dims)
def __getitem__(self, item):
if type(item) is int:
return self.index2word[item]
return self.word2index.get(item, self.UNK)
def __len__(self):
return len(self.index2word)
@lru_cache(maxsize=None)
def is_word(self, token_id: int) -> bool:
"""Return whether the token at `token_id` is a word; False for punctuations."""
if token_id < 4: return False
if token_id >= len(self): return True # OOV is assumed to be words
token_str = self.index2word[token_id]
if not word_detector.search(token_str) or token_str == '<P>':
return False
return True
def get_vocab_size(self):
return len(self.index2word)
def getIndex(self, word):
return self.word2index.get(word, self.UNK)
def getWord(self, idx):
return self.index2word[idx] if idx < len(self.index2word) else self.unk_token
def to_word_sequence(self, seq):
sentence = []
for idx in seq:
word = self.getWord(idx)
sentence.append(word)
return sentence
def to_index_sequence(self, sentence):
sentence = sentence.strip()
seq = []
for word in tokenize(sentence):
idx = self.getIndex(word)
seq.append(idx)
return seq
def to_index_sequence_for_list(self, words):
seq = []
for word in words:
idx = self.getIndex(word)
seq.append(idx)
return seq
def collect_vocabs(all_instances):
all_words = Counter()
all_edge_types = Counter()
for (sent1, sent2) in all_instances:
# for each in sent1.words:
# all_words.update(each)
for each in sent1.graph['g_features']:
all_words.update(each)
all_words.update(sent2.words)
# for node, value in sent1.graph['g_adj'].items():
# all_edge_types.update([each['edge'] for each in value])
return all_words, all_edge_types
|
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="Zn3KXHlnNzLcEZ9pnrLwkkhwzlkzJp7bjgy6DqXLLqyGP59Ayn1J7ZrlpxcnVxWe",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
|
import array
class bmp:
""" bmp data structure """
def __init__(self, w=1080, h=1920):
self.w = w
self.h = h
def calc_data_size (self):
if((self.w*3)%4 == 0):
self.dataSize = self.w * 3 * self.h
else:
self.dataSize = (((self.w * 3) // 4 + 1) * 4) * self.h
self.fileSize = self.dataSize + 54
def conv2byte(self, l, num, len):
tmp = num
for i in range(len):
l.append(tmp & 0x000000ff)
tmp >>= 8
def gen_bmp_header (self):
self.calc_data_size();
self.bmp_header = [0x42, 0x4d]
self.conv2byte(self.bmp_header, self.fileSize, 4) #file size
self.conv2byte(self.bmp_header, 0, 2)
self.conv2byte(self.bmp_header, 0, 2)
self.conv2byte(self.bmp_header, 54, 4) #rgb data offset
self.conv2byte(self.bmp_header, 40, 4) #info block size
self.conv2byte(self.bmp_header, self.w, 4)
self.conv2byte(self.bmp_header, self.h, 4)
self.conv2byte(self.bmp_header, 1, 2)
self.conv2byte(self.bmp_header, 24, 2) #888
self.conv2byte(self.bmp_header, 0, 4) #no compression
self.conv2byte(self.bmp_header, self.dataSize, 4) #rgb data size
self.conv2byte(self.bmp_header, 0, 4)
self.conv2byte(self.bmp_header, 0, 4)
self.conv2byte(self.bmp_header, 0, 4)
self.conv2byte(self.bmp_header, 0, 4)
def print_bmp_header (self):
length = len(self.bmp_header)
for i in range(length):
print("{:0>2x}".format(self.bmp_header[i]), end=' ')
if i%16 == 15:
print('')
print('')
def paint_bgcolor(self, color=0xffffff):
## self.rgbData = []
## for r in range(self.h):
## self.rgbDataRow = []
## for c in range(self.w):
## self.rgbDataRow.append(color)
## self.rgbData.append(self.rgbDataRow)
rgbDataRow = [color] * (self.w+1)
self.rgbData = [rgbDataRow.copy() for i in range(self.h)]
def set_at(self,x, y, color):
self.rgbData[y][x] = color
def paint_line(self, x1, y1, x2, y2, color):
k = (y2 - y1) / (x2 - x1)
for x in range(x1, x2+1):
y = int(k * (x - x1) + y1)
self.rgbData[y][x] = color
def paint_rect(self, x1, y1, w, h, color):
for x in range(x1, x1+w):
for y in range(y1, y1+h):
self.rgbData[y][x] = color
def save_image(self, name="save.bmp"):
f = open(name, 'wb')
#write bmp header
f.write(array.array('B', self.bmp_header).tobytes())
#write rgb data
zeroBytes = self.dataSize // self.h - self.w * 3
for r in range(self.h):
l = []
for i in range(len(self.rgbData[r])):
p = self.rgbData[r][i]
l.append(p & 0x0000ff)
p >>= 8
l.append(p & 0x0000ff)
p >>= 8
l.append(p & 0x0000ff)
f.write(array.array('B', l).tobytes())
for i in range(zeroBytes):
f.write(bytes(0x00))
f.close()
sand = list([0] * 2003 for i in range(2003))
final = ''
image = bmp(2003, 2003)
image.gen_bmp_header()
image.print_bmp_header()
image.paint_bgcolor(0x000000)
stack = []
def update(i, j):
if i == 0 or i == 2002 or j == 0 or j == 2002:
sand[i][j] = 0
elif sand[i][j] >= 4:
q = sand[i][j] // 4
sand[i + 1][j] += q
sand[i - 1][j] += q
sand[i][j + 1] += q
sand[i][j - 1] += q
sand[i][j] %= 4
stack.append((i + 1, j))
stack.append((i - 1, j))
stack.append((i, j + 1))
stack.append((i, j - 1))
for i in range(40000):
sand[1001][1001] += 1
stack.append((1001, 1001))
while stack:
update(*stack.pop())
if i%100==0:print(i)
for i in range(1, 2003):
for j in range(1, 2003):
image.set_at(i, j, [0x0000ff, 0x00ffff, 0x00ff00, 0xff0000][sand[i][j]])
if i%100==0:print(i)
image.save_image('sand.bmp')
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import copy
import logging
import os
import torch
from caffe2.proto import caffe2_pb2
from torch import nn
from detectron2.config import CfgNode as CN
from .caffe2_export import export_caffe2_detection_model
from .caffe2_export import export_onnx_model as export_onnx_model_impl
from .caffe2_export import run_and_save_graph
from .caffe2_inference import ProtobufDetectionModel
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph
__all__ = ["add_export_config", "export_caffe2_model", "Caffe2Model", "export_onnx_model"]
def add_export_config(cfg):
"""
Args:
cfg (CfgNode): a detectron2 config
Returns:
CfgNode: an updated config with new options that will be used
by :class:`Caffe2Tracer`.
"""
is_frozen = cfg.is_frozen()
cfg.defrost()
cfg.EXPORT_CAFFE2 = CN()
cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False
if is_frozen:
cfg.freeze()
return cfg
class Caffe2Tracer:
"""
Make a detectron2 model traceable with caffe2 style.
An original detectron2 model may not be traceable, or
cannot be deployed directly after being traced, due to some reasons:
1. control flow in some ops
2. custom ops
3. complicated pre/post processing
This class provides a traceable version of a detectron2 model by:
1. Rewrite parts of the model using ops in caffe2
2. Define the inputs "after pre-processing" as inputs to the model
3. Remove post-processing and produce raw layer outputs
More specifically about inputs: all builtin models take two input tensors.
(1) NCHW float "data" which is an image (usually in [0, 255])
(2) Nx3 float "im_info", each row of which is (height, width, 1.0)
After making a traceable model, the class provide methods to export such a
model to different deployment formats.
The class currently only supports models using builtin meta architectures.
Experimental. Don't use.
"""
def __init__(self, cfg, model, inputs):
"""
Args:
cfg (CfgNode): a detectron2 config, with extra export-related options
added by :func:`add_export_config`.
model (nn.Module): a model built by
:func:`detectron2.modeling.build_model`.
inputs: sample inputs that the given model takes for inference.
Will be used to trace the model.
"""
assert isinstance(cfg, CN), cfg
assert isinstance(model, torch.nn.Module), type(model)
if "EXPORT_CAFFE2" not in cfg:
cfg = add_export_config(cfg) # will just the defaults
self.cfg = cfg
self.model = model
self.inputs = inputs
def _get_traceable(self):
# TODO how to make it extensible to support custom models
C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[self.cfg.MODEL.META_ARCHITECTURE]
traceable_model = C2MetaArch(self.cfg, copy.deepcopy(self.model))
traceable_inputs = traceable_model.get_caffe2_inputs(self.inputs)
return traceable_model, traceable_inputs
def export_caffe2(self):
"""
Export the model to Caffe2's protobuf format.
The returned object can be saved with `.save_protobuf()` method.
The result can be loaded and executed using Caffe2 runtime.
Returns:
Caffe2Model
"""
model, inputs = self._get_traceable()
predict_net, init_net = export_caffe2_detection_model(model, inputs)
return Caffe2Model(predict_net, init_net)
def export_onnx(self):
"""
Export the model to ONNX format.
Note that the exported model contains custom ops only available in caffe2, therefore it
cannot be directly executed by other runtime. Post-processing or transformation passes
may be applied on the model to accommodate different runtimes.
Returns:
onnx.ModelProto: an onnx model.
"""
model, inputs = self._get_traceable()
return export_onnx_model_impl(model, (inputs,))
def export_torchscript(self):
"""
Export the model to a `torch.jit.TracedModule` by tracing.
The returned object can be saved to a file by ".save()".
Returns:
torch.jit.TracedModule: a torch TracedModule
"""
model, inputs = self._get_traceable()
logger = logging.getLogger(__name__)
logger.info("Tracing the model with torch.jit.trace ...")
with torch.no_grad():
return torch.jit.trace(model, (inputs,))
def export_caffe2_model(cfg, model, inputs):
"""
Export a detectron2 model to caffe2 format.
Args:
cfg (CfgNode): a detectron2 config, with extra export-related options
added by :func:`add_export_config`.
model (nn.Module): a model built by
:func:`detectron2.modeling.build_model`.
It will be modified by this function.
inputs: sample inputs that the given model takes for inference.
Will be used to trace the model.
Returns:
Caffe2Model
"""
return Caffe2Tracer(cfg, model, inputs).export_caffe2()
def export_onnx_model(cfg, model, inputs):
"""
Export a detectron2 model to ONNX format.
Note that the exported model contains custom ops only available in caffe2, therefore it
cannot be directly executed by other runtime. Post-processing or transformation passes
may be applied on the model to accommodate different runtimes.
Args:
cfg (CfgNode): a detectron2 config, with extra export-related options
added by :func:`add_export_config`.
model (nn.Module): a model built by
:func:`detectron2.modeling.build_model`.
It will be modified by this function.
inputs: sample inputs that the given model takes for inference.
Will be used to trace the model.
Returns:
onnx.ModelProto: an onnx model.
"""
return Caffe2Tracer(cfg, model, inputs).export_onnx()
class Caffe2Model(nn.Module):
"""
A wrapper around the traced model in caffe2's pb format.
"""
def __init__(self, predict_net, init_net):
super().__init__()
self.eval() # always in eval mode
self._predict_net = predict_net
self._init_net = init_net
self._predictor = None
@property
def predict_net(self):
"""
Returns:
core.Net: the underlying caffe2 predict net
"""
return self._predict_net
@property
def init_net(self):
"""
Returns:
core.Net: the underlying caffe2 init net
"""
return self._init_net
__init__.__HIDE_SPHINX_DOC__ = True
def save_protobuf(self, output_dir):
"""
Save the model as caffe2's protobuf format.
Args:
output_dir (str): the output directory to save protobuf files.
"""
logger = logging.getLogger(__name__)
logger.info("Saving model to {} ...".format(output_dir))
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "model.pb"), "wb") as f:
f.write(self._predict_net.SerializeToString())
with open(os.path.join(output_dir, "model.pbtxt"), "w") as f:
f.write(str(self._predict_net))
with open(os.path.join(output_dir, "model_init.pb"), "wb") as f:
f.write(self._init_net.SerializeToString())
def save_graph(self, output_file, inputs=None):
"""
Save the graph as SVG format.
Args:
output_file (str): a SVG file
inputs: optional inputs given to the model.
If given, the inputs will be used to run the graph to record
shape of every tensor. The shape information will be
saved together with the graph.
"""
if inputs is None:
save_graph(self._predict_net, output_file, op_only=False)
else:
size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0)
device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii")
inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device)
inputs = [x.cpu().numpy() for x in inputs]
run_and_save_graph(self._predict_net, self._init_net, inputs, output_file)
@staticmethod
def load_protobuf(dir):
"""
Args:
dir (str): a directory used to save Caffe2Model with
:meth:`save_protobuf`.
The files "model.pb" and "model_init.pb" are needed.
Returns:
Caffe2Model: the caffe2 model loaded from this directory.
"""
predict_net = caffe2_pb2.NetDef()
with open(os.path.join(dir, "model.pb"), "rb") as f:
predict_net.ParseFromString(f.read())
init_net = caffe2_pb2.NetDef()
with open(os.path.join(dir, "model_init.pb"), "rb") as f:
init_net.ParseFromString(f.read())
return Caffe2Model(predict_net, init_net)
def __call__(self, inputs):
"""
An interface that wraps around a caffe2 model and mimics detectron2's models'
input & output format. This is used to compare the outputs of caffe2 model
with its original torch model.
Due to the extra conversion between torch/caffe2,
this method is not meant for benchmark.
"""
if self._predictor is None:
self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net)
return self._predictor(inputs)
|
import sys,os
sys.path.append(os.path.dirname(__file__) + os.sep + '../')
from FINDER import FINDER
import numpy as np
from tqdm import tqdm
import time
import networkx as nx
import pandas as pd
import pickle as cp
import random
def mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
g_type = "barabasi_albert"
def GetSolution(STEPRATIO, MODEL_FILE):
######################################################################################################################
##................................................Get Solution (model).....................................................
dqn = FINDER()
## data_test
data_test_path = '../../data/real/cost/'
#data_test_name = ['Crime', 'HI-II-14']
#data_test_costType = ['degree', 'random']
#data_test_name = ['HI-II-14', 'Digg']
data_test_name = ['modified-morPOP-NL-day20.txt']
data_test_costType = ['degree']
#data_test_costType = ['degree']
#model_file = './FINDER_ND_cost/models/%s'%MODEL_FILE
model_file = './models/{}'.format(MODEL_FILE)
## save_dir
save_dir = '../results/my_FINDER_CN_cost_tf/real'
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
save_dir_degree = save_dir + '/Data_degree'
save_dir_random = save_dir + '/Data_random'
mkdir(save_dir_degree)
mkdir(save_dir_random)
## begin computing...
print('The best model is :%s' % (model_file))
dqn.LoadModel(model_file)
for costType in data_test_costType:
df = pd.DataFrame(np.arange(1 * len(data_test_name)).reshape((1, len(data_test_name))), index=['time'],
columns=data_test_name)
#################################### modify to choose which stepRatio to get the solution
stepRatio = STEPRATIO
for j in range(len(data_test_name)):
print('Testing dataset %s' % data_test_name[j])
data_test = data_test_path + data_test_name[j] + '_' + costType + '.gml'
if costType == 'degree':
solution, time = dqn.EvaluateRealData(model_file, data_test, save_dir_degree, stepRatio)
elif costType == 'random':
solution, time = dqn.EvaluateRealData(model_file, data_test, save_dir_random, stepRatio)
df.iloc[0, j] = time
if costType == 'degree':
save_dir_local = save_dir_degree + '/StepRatio_%.4f' % stepRatio
elif costType == 'random':
save_dir_local = save_dir_random + '/StepRatio_%.4f' % stepRatio
if not os.path.exists(save_dir_local):
mkdir(save_dir_local)
df.to_csv(save_dir_local + '/solution_%s_time.csv' % costType, encoding='utf-8', index=False)
print('model has been tested!')
def EvaluateSolution(STEPRATIO, STRTEGYID):
#######################################################################################################################
##................................................Evaluate Solution.....................................................
dqn = FINDER()
## data_test
data_test_path = '../../data/real/cost/'
#data_test_name = ['Crime', 'HI-II-14']
#data_test_costType = ['degree', 'random']
#data_test_name = ['HI-II-14', 'Digg']
data_test_name = ['modified-morPOP-NL-day20.txt']
data_test_costType = ['degree']
#data_test_costType = ['degree']
## save_dir
save_dir_degree = '../results/my_FINDER_CN_cost_tf/real/Data_degree/StepRatio_%.4f/' % STEPRATIO
save_dir_random = '../results/my_FINDER_CN_cost_tf/real/Data_random/StepRatio_%.4f/' % STEPRATIO
## begin computing...
for costType in data_test_costType:
df = pd.DataFrame(np.arange(2 * len(data_test_name)).reshape((2, len(data_test_name))),
index=['solution', 'time'], columns=data_test_name)
for i in range(len(data_test_name)):
print('Evaluating dataset %s' % data_test_name[i])
data_test = data_test_path + data_test_name[i] + '_' + costType + '.gml'
if costType == 'degree':
solution = save_dir_degree + data_test_name[i] + '_degree.txt'
elif costType == 'random':
solution = save_dir_random + data_test_name[i] + '_random.txt'
t1 = time.time()
# strategyID: 0:no insert; 1:count; 2:rank; 3:multiply
################################## modify to choose which strategy to evaluate
strategyID = STRTEGYID
score, MaxCCList, solution = dqn.EvaluateSol(data_test, solution, strategyID, reInsertStep=20)
t2 = time.time()
df.iloc[0, i] = score
df.iloc[1, i] = t2 - t1
if costType == 'degree':
result_file = save_dir_degree + '/MaxCCList__Strategy_' + data_test_name[i] + '.txt'
elif costType == 'random':
result_file = save_dir_random + '/MaxCCList_Strategy_' + data_test_name[i] + '.txt'
with open(result_file, 'w') as f_out:
for j in range(len(MaxCCList)):
f_out.write('%.8f\n' % MaxCCList[j])
print('Data:%s, score:%.6f!' % (data_test_name[i], score))
if costType == 'degree':
df.to_csv(save_dir_degree + '/solution_%s_score.csv' % (costType), encoding='utf-8', index=False)
elif costType == 'random':
df.to_csv(save_dir_random + '/solution_%s_score.csv' % (costType), encoding='utf-8', index=False)
def main():
model_file = 'Model_{}/nrange_30_50_iter_400000.ckpt'.format(g_type)
#model_file = 'nrange_30_50_iter_122100.ckpt'
GetSolution(0.01, model_file)
EvaluateSolution(0.01, 0)
if __name__=="__main__":
main()
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
from io import StringIO
import itertools
import mmap
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
overload,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib, properties
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
ArrayLike,
Axes,
Axis,
CompressionOptions,
Dtype,
FilePathOrBuffer,
FrameOrSeriesUnion,
IndexKeyFunc,
Label,
Level,
Renamer,
StorageOptions,
ValueKeyFunc,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
cast_scalar_to_array,
coerce_to_dtypes,
construct_1d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_box_datetimelike,
maybe_cast_to_datetime,
maybe_casted_values,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
maybe_upcast,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_named_tuple,
is_object_dtype,
is_scalar,
is_sequence,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, generic, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.aggregation import (
aggregate,
reconstruct_func,
relabel_result,
transform,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import extract_array
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
PeriodIndex,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.multi import MultiIndex, maybe_droplevels
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
get_names_from_index,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
reorder_arrays,
sanitize_index,
to_arrays,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.core.sorting import get_group_index, lexsort_indexer, nargsort
from pandas.io.common import get_handle
from pandas.io.formats import console, format as fmt
from pandas.io.formats.info import BaseInfo, DataFrameInfo
import pandas.plotting
if TYPE_CHECKING:
from typing import Literal
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = {
"axes": "index, columns",
"klass": "DataFrame",
"axes_single_arg": "{0 or 'index', 1 or 'columns'}",
"axis": """axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
"optional_labels": """labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
"optional_axis": """axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
}
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
When performing a cross merge, no column specifications to merge on are
allowed.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
name by providing a string argument. The column will have a Categorical
type with the value of "left_only" for observations whose merge key only
appears in the left DataFrame, "right_only" for observations
whose merge key only appears in the right DataFrame, and "both"
if the observation's merge key is found in both DataFrames.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
>>> df1
a b
0 foo 1
1 bar 2
>>> df2
a c
0 foo 3
1 baz 4
>>> df1.merge(df2, how='inner', on='a')
a b c
0 foo 1 3
>>> df1.merge(df2, how='left', on='a')
a b c
0 foo 1 3.0
1 bar 2 NaN
>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})
>>> df2 = pd.DataFrame({'right': [7, 8]})
>>> df1
left
0 foo
1 bar
>>> df2
right
0 7
1 8
>>> df1.merge(df2, how='cross')
left right
0 foo 7
1 foo 8
2 bar 7
3 bar 8
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame, OpsMixin):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, dataclass or list-like objects. If
data is a dict, column order follows insertion-order.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
Constructing DataFrame from dataclass:
>>> from dataclasses import make_dataclass
>>> Point = make_dataclass("Point", [("x", int), ("y", int)])
>>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
x y
0 0 0
1 0 3
2 2 3
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
_HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)
@property
def _constructor(self) -> Type[DataFrame]:
return DataFrame
_constructor_sliced: Type[Series] = Series
_hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
def _constructor_expanddim(self):
# GH#31549 raising NotImplementedError on a property causes trouble
# for `inspect`
def constructor(*args, **kwargs):
raise NotImplementedError("Not supported for DataFrames!")
return constructor
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if is_extension_array_dtype(dtype):
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)
else:
# Attempt to coerce to a numpy array
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if arr.ndim != 0:
raise ValueError("DataFrame constructor not properly called!")
values = cast_scalar_to_array(
(len(index), len(columns)), data, dtype=dtype
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
@property
def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape : Tuple of array dimensions.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if self._mgr.any_extension_types:
# TODO(EA2D) special case would be unnecessary with 2D EAs
return False
return len(self._mgr.blocks) == 1
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipynb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(line) for line in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
)
return fmt.DataFrameRenderer(formatter).to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[int] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[fmt.FormattersType] = None,
float_format: Optional[fmt.FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
)
return fmt.DataFrameRenderer(formatter).to_string(
buf=buf,
encoding=encoding,
line_width=line_width,
)
# ----------------------------------------------------------------------
@property
def style(self) -> Styler:
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[Tuple[Label, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[Tuple[Label, Series]]:
yield from self.items()
def iterrows(self) -> Iterable[Tuple[Label, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index: bool = True, name: Optional[str] = "Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right._values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return self._constructor_sliced(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
try:
return self.T.dot(np.transpose(other)).T
except ValueError as err:
if "shape mismatch" not in str(err):
raise
# GH#21581 give exception message for original shapes
msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(
self, dtype=None, copy: bool = False, na_value=lib.no_default
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
.. versionadded:: 1.1.0
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
self._consolidate_inplace()
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
if result.dtype is not dtype:
result = np.array(result, dtype=dtype, copy=False)
return result
def to_dict(self, orient="dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c((k, v.tolist()) for k, v in self.items())
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "series":
return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
) -> DataFrame:
"""
Convert structured or record ndarray to DataFrame.
Creates a DataFrame object from a structured ndarray, sequence of
tuples or dicts, or DataFrame.
Parameters
----------
data : structured ndarray, sequence of tuples or dicts, or DataFrame
Structured input data.
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_dict : DataFrame from dict of array-like or dicts.
DataFrame : DataFrame object creation using constructor.
Examples
--------
Data can be provided as a structured ndarray:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of dicts:
>>> data = [{'col_1': 3, 'col_2': 'a'},
... {'col_1': 2, 'col_2': 'b'},
... {'col_1': 1, 'col_2': 'c'},
... {'col_1': 0, 'col_2': 'd'}]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of tuples with corresponding columns:
>>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
>>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns_list = []
for k, v in data.items():
if k in columns:
arr_columns_list.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index._values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Optional[Dtype] = None,
verify_integrity: bool = True,
) -> DataFrame:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
mgr = arrays_to_mgr(
arrays,
columns,
index,
columns,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls(mgr)
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePathOrBuffer,
convert_dates: Optional[Dict[Label, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Label, str]] = None,
version: Optional[int] = 114,
convert_strl: Optional[Sequence[Label]] = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {{114, 117, 118, 119, None}}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
Version 119 should usually only be used when the number of
variables exceeds the capacity of dta format 118. Exporting
smaller datasets in format 119 may have unintended consequences,
and, as of November 2020, Stata SE cannot read version 119 files.
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies
compression mode. Compression mode must be one of {{'infer', 'gzip',
'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and
`fname` is path-like, then detect compression from the following
extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
compression). If dict and compression mode is one of {{'zip',
'gzip', 'bz2'}}, or inferred as one of the above, other entries
passed as additional compression options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriterUTF8 as statawriter,
)
kwargs: Dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
# mypy: Too many arguments for "StataWriter"
writer = statawriter( # type: ignore[call-arg]
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
compression=compression,
storage_options=storage_options,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str or file-like object
If a string, it will be used as Root Directory path.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@doc(
Series.to_markdown,
klass=_shared_doc_kwargs["klass"],
storage_options=_shared_docs["storage_options"],
examples="""Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
Output markdown with a tabulate option.
>>> print(df.to_markdown(tablefmt="grid"))
+----+------------+------------+
| | animal_1 | animal_2 |
+====+============+============+
| 0 | elk | dog |
+----+------------+------------+
| 1 | pig | quetzal |
+----+------------+------------+
""",
)
def to_markdown(
self,
buf: Optional[Union[IO[str], str]] = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[str]:
if "showindex" in kwargs:
warnings.warn(
"'showindex' is deprecated. Only 'index' will be used "
"in a future version. Use 'index' to silence this warning.",
FutureWarning,
stacklevel=2,
)
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
kwargs.setdefault("showindex", index)
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
with get_handle(buf, mode, storage_options=storage_options) as handles:
assert not isinstance(handles.handle, (str, mmap.mmap))
handles.handle.writelines(result)
return None
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path: Optional[FilePathOrBuffer] = None,
engine: str = "auto",
compression: Optional[str] = "snappy",
index: Optional[bool] = None,
partition_cols: Optional[List[str]] = None,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[bytes]:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str or file-like object, default None
If a string, it will be used as Root Directory path
when writing a partitioned dataset. By file-like object,
we refer to objects with a write() method, such as a file handle
(e.g. via builtin open function) or io.BytesIO. The engine
fastparquet does not accept file-like objects. If path is None,
a bytes object is returned.
.. versionchanged:: 1.2.0
Previously this was "fname"
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
.. versionadded:: 0.24.0
{storage_options}
.. versionadded:: 1.2.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
Returns
-------
bytes if no path argument is provided else None
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
If you want to get a buffer to the parquet content you can use a io.BytesIO
object, as long as you don't use partition_cols, which creates multiple files.
>>> import io
>>> f = io.BytesIO()
>>> df.to_parquet(f)
>>> f.seek(0)
0
>>> content = f.read()
"""
from pandas.io.parquet import to_parquet
return to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int, list or dict of int or str",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
encoding=None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
justify=justify,
index_names=index_names,
escape=escape,
decimal=decimal,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return fmt.DataFrameRenderer(formatter).to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
table_id=table_id,
render_links=render_links,
)
# ----------------------------------------------------------------------
@Substitution(
klass="DataFrame",
type_sub=" and columns",
max_cols_sub=dedent(
"""\
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used."""
),
show_counts_sub=dedent(
"""\
show_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the DataFrame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
null_counts : bool, optional
.. deprecated:: 1.2.0
Use show_counts instead."""
),
examples_sub=dedent(
"""\
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 165.9 MB"""
),
see_also_sub=dedent(
"""\
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns."""
),
version_added_sub="",
)
@doc(BaseInfo.render)
def info(
self,
verbose: Optional[bool] = None,
buf: Optional[IO[str]] = None,
max_cols: Optional[int] = None,
memory_usage: Optional[Union[bool, str]] = None,
show_counts: Optional[bool] = None,
null_counts: Optional[bool] = None,
) -> None:
if null_counts is not None:
if show_counts is not None:
raise ValueError("null_counts used with show_counts. Use show_counts.")
warnings.warn(
"null_counts is deprecated. Use show_counts instead",
FutureWarning,
stacklevel=2,
)
show_counts = null_counts
info = DataFrameInfo(
data=self,
memory_usage=memory_usage,
)
info.render(
buf=buf,
max_cols=max_cols,
verbose=verbose,
show_counts=show_counts,
)
def memory_usage(self, index=True, deep=False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 180000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5244
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = self._constructor_sliced(
self.index.memory_usage(deep=deep), index=["Index"]
).append(result)
return result
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8.0
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, {})
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_values = self.values.T
if copy:
new_values = new_values.copy()
result = self._constructor(
new_values, index=self.columns, columns=self.index
)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> DataFrame:
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._mgr.iget(i)
result = self._box_col_values(values, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._mgr.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
indexer = lib.maybe_indices_to_slice(
indexer.astype(np.intp, copy=False), len(self)
)
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
# GH#26490 using data[key] can cause RecursionError
data = data._get_item_cache(key)
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
try:
loc = engine.get_loc(index)
return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.iloc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
self.loc._ensure_listlike_indexer(key, axis=1, value=value)
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.iloc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _iset_item(self, loc: int, value):
self._ensure_valid_index(value)
# technically _sanitize_column expects a label, not a position,
# but the behavior is the same as long as we pass broadcast=False
value = self._sanitize_column(loc, value, broadcast=False)
NDFrame._iset_item(self, loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
"""
try:
if takeable is True:
series = self._ixs(col, axis=1)
series._set_value(index, value, takeable=True)
return
series = self._get_item_cache(col)
engine = self.index._engine
loc = engine.get_loc(index)
validate_numeric_casting(series.dtype, value)
series._values[loc] = value
# Note: trying to use series._set_value breaks tests in
# tests.frame.indexing.test_indexing and tests.indexing.test_partial
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
if self.index.name is not None:
index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
def _box_col_values(self, values, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
return klass(values, index=self.index, name=name, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks. Thus, column names containing spaces
or punctuations (besides underscores) or starting with digits must be
surrounded by backticks. (For example, a column named "Area (cm^2) would
be referenced as `Area (cm^2)`). Column names which are Python keywords
(like "list", "for", "import", etc) cannot be used.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame or None
DataFrame resulting from the provided query expression or
None if ``inplace=True``.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
else:
return result
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, pandas object, or None
The result of the evaluation or None if ``inplace=True``.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> DataFrame:
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
See Also
--------
DataFrame.dtypes: Return Series with the data type of each column.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://numpy.org/doc/stable/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int64'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
include = frozenset(infer_dtype_from_object(x) for x in include)
exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
) -> List[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
# error: Argument 1 to "tuple" has incompatible type
# "FrozenSet[Union[ExtensionDtype, str, Any, Type[str],
# Type[float], Type[int], Type[complex], Type[bool]]]";
# expected "Iterable[Union[type, Tuple[Any, ...]]]"
if issubclass(
unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]
)
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates=False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'allow_duplicates=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs) -> DataFrame:
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
# other
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
# upcast
if is_extension_array_dtype(infer_dtype):
value = construct_1d_arraylike_from_scalar(
value, len(self.index), infer_dtype
)
else:
# pandas\core\frame.py:3827: error: Argument 1 to
# "cast_scalar_to_array" has incompatible type "int"; expected
# "Tuple[Any, ...]" [arg-type]
value = cast_scalar_to_array(
len(self.index), value # type: ignore[arg-type]
)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {
item: Series(
self._mgr.iget(idx), index=self.index, name=item, fastpath=True
)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
use DataFrame.melt and DataFrame.loc instead.
For an example see :meth:`~pandas.DataFrame.lookup`
in the user guide.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
msg = (
"The 'lookup' method is deprecated and will be"
"removed in a future version."
"You can use DataFrame.melt and DataFrame.loc"
"as a substitute."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy,
level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy,
level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy, fill_value) -> DataFrame:
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
) -> DataFrame:
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> DataFrame:
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or None
DataFrame without the removed index or column labels or
None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[DataFrame]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame or None
DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters:
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Optional[DataFrame]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Label) -> Series:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res.iloc[:, i] = newobj
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self, periods=1, freq=None, axis=0, fill_value=lib.no_default
) -> DataFrame:
axis = self._get_axis_number(axis)
ncols = len(self.columns)
if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:
# We will infer fill_value to match the closest column
if periods > 0:
result = self.iloc[:, :-periods]
for col in range(min(ncols, abs(periods))):
# TODO(EA2D): doing this in a loop unnecessary with 2D EAs
# Define filler inside loop so we get a copy
filler = self.iloc[:, 0].shift(len(self))
result.insert(0, col, filler, allow_duplicates=True)
else:
result = self.iloc[:, -periods:]
for col in range(min(ncols, abs(periods))):
# Define filler inside loop so we get a copy
filler = self.iloc[:, -1].shift(len(self))
result.insert(
len(result.columns), col, filler, allow_duplicates=True
)
result.columns = self.columns.copy()
return result
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
If True, modifies the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame or None
Changed row labels or None if ``inplace=True``.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: List[Label] = []
for col in keys:
if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names: List[Label] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: List[Label] = []
for col in keys:
if isinstance(col, MultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (Index, Series)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
@overload
# https://github.com/python/mypy/issues/6580
# Overloaded function signatures 1 and 2 overlap with incompatible return types
def reset_index( # type: ignore[misc]
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
drop: bool = ...,
inplace: Literal[False] = ...,
col_level: Hashable = ...,
col_fill: Label = ...,
) -> DataFrame:
...
@overload
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
drop: bool = ...,
inplace: Literal[True] = ...,
col_level: Hashable = ...,
col_fill: Label = ...,
) -> None:
...
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Label = "",
) -> Optional[DataFrame]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if inplace:
new_obj = self
else:
new_obj = self.copy()
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, MultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> DataFrame:
result = self._constructor(self._mgr.isna(func=isna))
return result.__finalize__(self, method="isna")
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> DataFrame:
return self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> DataFrame:
return ~self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> DataFrame:
return ~self.isna()
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame or None
DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Optional[DataFrame]:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64
if self.empty:
return self._constructor_sliced(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
# Sorting
# TODO: Just move the sort_values doc here.
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
keys = [Series(k, name=name) for (k, name) in zip(keys, by)]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
indexer = ensure_platform_int(indexer)
else:
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
k = Series(k, name=by)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis=0,
level=None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape. For MultiIndex
inputs, the key is applied *per level*.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
>>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
b 2
C 3
d 4
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def value_counts(
self,
subset: Optional[Sequence[Label]] = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
2 2 1
6 0 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
2 2 0.25
6 0 0.25
dtype: float64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset).grouper.size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n, columns, keep="first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep="first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0) -> DataFrame:
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0) -> DataFrame:
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic Methods
def _cmp_method(self, other, op):
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)
# See GH#4537 for discussion of scalar op behavior
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
def _arith_method(self, other, op):
if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):
return ops.frame_arith_method_with_reindex(self, other, op)
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
_logical_method = _arith_method
def _dispatch_frame_op(self, right, func, axis: Optional[int] = None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
Parameters
----------
right : scalar, Series, or DataFrame
func : arithmetic or comparison operator
axis : {None, 0, 1}
Returns
-------
DataFrame
"""
# Get the appropriate array-op to apply to each column/block's values.
array_op = ops.get_array_op(func)
right = lib.item_from_zerodim(right)
if not is_list_like(right):
# i.e. scalar, faster than checking np.ndim(right) == 0
bm = self._mgr.apply(array_op, right=right)
return type(self)(bm)
elif isinstance(right, DataFrame):
assert self.index.equals(right.index)
assert self.columns.equals(right.columns)
# TODO: The previous assertion `assert right._indexed_same(self)`
# fails in cases with empty columns reached via
# _frame_arith_method_with_reindex
bm = self._mgr.operate_blockwise(right._mgr, array_op)
return type(self)(bm)
elif isinstance(right, Series) and axis == 1:
# axis=1 means we want to operate row-by-row
assert right.index.equals(self.columns)
right = right._values
# maybe_align_as_frame ensures we do not have an ndarray here
assert not isinstance(right, np.ndarray)
arrays = [
array_op(_left, _right)
for _left, _right in zip(self._iter_column_arrays(), right)
]
elif isinstance(right, Series):
assert right.index.equals(self.index) # Handle other cases later
right = right._values
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
return type(self)._from_arrays(
arrays, self.columns, self.index, verify_integrity=False
)
def _combine_frame(self, other: DataFrame, func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
new_data = self._dispatch_frame_op(other, _arith_op)
return new_data
def _construct_result(self, result) -> DataFrame:
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
out.index = self.index
return out
def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = self // other
mod = self - div * other
return div, mod
def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = other // self
mod = other - div * self
return div, mod
# ----------------------------------------------------------------------
# Combination-Related
@doc(
_shared_docs["compare"],
"""
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
Raises
------
ValueError
When the two DataFrames don't have identical labels or shape.
See Also
--------
Series.compare : Compare with another Series and show differences.
DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
Can only compare identically-labeled
(i.e. same shape, identical row and column labels) DataFrames
Examples
--------
>>> df = pd.DataFrame(
... {{
... "col1": ["a", "a", "b", "b", "a"],
... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],
... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
... }},
... columns=["col1", "col2", "col3"],
... )
>>> df
col1 col2 col3
0 a 1.0 1.0
1 a 2.0 2.0
2 b 3.0 3.0
3 b NaN 4.0
4 a 5.0 5.0
>>> df2 = df.copy()
>>> df2.loc[0, 'col1'] = 'c'
>>> df2.loc[2, 'col3'] = 4.0
>>> df2
col1 col2 col3
0 c 1.0 1.0
1 a 2.0 2.0
2 b 3.0 4.0
3 b NaN 4.0
4 a 5.0 5.0
Align the differences on columns
>>> df.compare(df2)
col1 col3
self other self other
0 a c NaN NaN
2 NaN NaN 3.0 4.0
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
col1 col3
0 self a NaN
other c NaN
2 self NaN 3.0
other NaN 4.0
Keep the equal values
>>> df.compare(df2, keep_equal=True)
col1 col3
self other self other
0 a c 1.0 1.0
2 b b 3.0 4.0
Keep all original rows and columns
>>> df.compare(df2, keep_shape=True)
col1 col2 col3
self other self other self other
0 a c NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN 3.0 4.0
3 NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN
Keep all original rows and columns and also all original values
>>> df.compare(df2, keep_shape=True, keep_equal=True)
col1 col2 col3
self other self other self other
0 a c 1.0 1.0 1.0 1.0
1 a a 2.0 2.0 2.0 2.0
2 b b 3.0 3.0 3.0 4.0
3 b b NaN NaN 4.0 4.0
4 a a 5.0 5.0 5.0 5.0
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: DataFrame,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(
self, other: DataFrame, func, fill_value=None, overwrite=True
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: DataFrame) -> DataFrame:
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y):
mask = extract_array(isna(x))
x_values = extract_array(x, extract_numpy=True)
y_values = extract_array(y, extract_numpy=True)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(
self, other, join="left", overwrite=True, filter_func=None, errors="ignore"
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-column(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, its name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
We can also choose to include NA in group keys or not by setting
`dropna` parameter, the default setting is `True`:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum()
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum()
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by="a").sum()
b c
a
a 13.0 13.0
b 12.3 123.0
>>> df.groupby(by="a", dropna=False).sum()
b c
a
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> DataFrameGroupBy:
from pandas.core.groupby.generic import DataFrameGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
dropna=dropna,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> DataFrame:
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.melt: Unpivot a DataFrame from wide to long format,
optionally leaving identifiers set.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
result = stack_multiple(self, level, dropna=dropna)
else:
result = stack(self, level, dropna=dropna)
return result.__finalize__(self, method="stack")
def explode(
self, column: Union[str, Tuple], ignore_index: bool = False
) -> DataFrame:
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of rows in the
output will be non-deterministic when exploding sets.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
if ignore_index:
result.index = ibase.default_index(len(result))
else:
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
result = unstack(self, level, fill_value)
return result.__finalize__(self, method="unstack")
@Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"})
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
ignore_index=True,
) -> DataFrame:
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
# ----------------------------------------------------------------------
# Time series-related
@doc(
Series.diff,
klass="Dataframe",
extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n "
"Take difference over rows (0) or columns (1).\n",
other_klass="Series",
examples=dedent(
"""
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0 0
1 NaN -1 3
2 NaN -1 7
3 NaN -1 13
4 NaN 0 20
5 NaN 2 28
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
Overflow in input dtype
>>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)
>>> df.diff()
a
0 NaN
1 255.0"""
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
if not isinstance(periods, int):
if not (is_float(periods) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
bm_axis = self._get_block_manager_axis(axis)
if bm_axis == 0 and periods != 0:
return self - self.shift(periods, axis=axis)
new_data = self._mgr.diff(n=periods, axis=bm_axis)
return self._constructor(new_data).__finalize__(self, "diff")
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: Union[Label, List[Label]],
ndim: int,
subset: Optional[FrameOrSeriesUnion] = None,
) -> FrameOrSeriesUnion:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
sum 12.0 NaN
min 1.0 2.0
max NaN 8.0
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
>>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
A B C
x 7.0 NaN NaN
y NaN 2.0 NaN
z NaN NaN 6.0
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
result = None
try:
result, how = self._aggregate(func, axis, *args, **kwargs)
except TypeError as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = aggregate(self.T, arg, *args, **kwargs)
result = result.T if result is not None else result
return result, how
return aggregate(self, arg, *args, **kwargs)
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame:
result = transform(self, func, axis, *args, **kwargs)
assert isinstance(result, DataFrame)
return result
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.get_result()
def applymap(self, func, na_action: Optional[str] = None) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If ‘ignore’, propagate NaN values, without passing them to func.
.. versionadded:: 1.2
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Like Series.map, NA values can be ignored:
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
>>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
0 1
0 <NA> 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
ignore_na = na_action == "ignore"
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func, ignore_na=ignore_na)
return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)
return self.apply(infer).__finalize__(self, "applymap")
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self, other, ignore_index=False, verify_integrity=False, sort=False
) -> DataFrame:
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return (
concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
).__finalize__(self, method="append")
def join(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
) -> DataFrame:
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-column(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
if how == "cross":
return merge(
self,
other,
how=how,
on=on,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
) -> DataFrame:
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs) -> DataFrame:
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method == "pearson":
correl = libalgos.nancorr(mat, minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(mat, minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(
self, min_periods: Optional[int] = None, ddof: Optional[int] = 1
) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
base_cov = np.empty((mat.shape[1], mat.shape[1]))
base_cov.fill(np.nan)
else:
base_cov = np.cov(mat.T, ddof=ddof)
base_cov = base_cov.reshape((len(cols), len(cols)))
else:
base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)
return self._constructor(base_cov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = self._constructor_sliced(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = self._constructor_sliced(
counts, index=frame._get_agg_axis(axis)
)
return result.astype("int64")
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._shallow_copy(name=level_name)
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = self._constructor(counts, index=agg_axis, columns=level_index)
else:
result = self._constructor(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]
dtype_is_dt = np.array(
[is_datetime64_any_dtype(dtype) for dtype in own_dtypes],
dtype=bool,
)
if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
stacklevel=5,
)
cols = self.columns[~dtype_is_dt]
self = self[cols]
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
assert axis in [0, 1]
def func(values):
if is_extension_array_dtype(values.dtype):
return extract_array(values)._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=axis, skipna=skipna, **kwds)
def blk_func(values):
if isinstance(values, ExtensionArray):
return values._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=1, skipna=skipna, **kwds)
def _get_data() -> DataFrame:
if filter_type is None:
data = self._get_numeric_data()
else:
# GH#25101, GH#24434
assert filter_type == "bool"
data = self._get_bool_data()
return data
if numeric_only is not None or axis == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
if numeric_only is True:
df = _get_data()
if axis == 1:
df = df.T
axis = 0
ignore_failures = numeric_only is None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and is_object_dtype(out.dtype):
# GH#35865 careful to cast explicitly to object
nvs = coerce_to_dtypes(out.values, df.dtypes.iloc[np.sort(indexer)])
out[:] = np.array(nvs, dtype=object)
if axis == 0 and len(self) == 0 and name in ["sum", "prod"]:
# Even if we are object dtype, follow numpy and return
# float64, see test_apply_funcs_over_empty
out = out.astype(np.float64)
return out
assert numeric_only is None
data = self
values = data.values
try:
result = func(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
data = _get_data()
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = func(values)
if filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
elif filter_type is None and is_object_dtype(result.dtype):
try:
result = result.astype(np.float64)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, data.dtypes)
result = self._constructor_sliced(result, index=labels)
return result
def nunique(self, axis=0, dropna=True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax : Return index of the maximum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the maximum value in each column.
>>> df.idxmax()
consumption Wheat Products
co2_emissions Beef
dtype: object
To return the index for the maximum value in each row, use ``axis="columns"``.
>>> df.idxmax(axis="columns")
Pork co2_emissions
Wheat Products consumption
Beef co2_emissions
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num: int) -> Index:
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(self, axis=0, numeric_only=False, dropna=True) -> DataFrame:
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. Because the resulting DataFrame has two rows,
the second row of ``species`` and ``legs`` contains ``NaN``.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
result = data._mgr.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(
self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True
) -> DataFrame:
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, PeriodIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, DatetimeIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
return self._constructor(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {
**NDFrame._AXIS_TO_AXIS_NUMBER,
1: 1,
"columns": 1,
}
_AXIS_REVERSED = True
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: Index = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: Index = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
@property
def _AXIS_NUMBERS(self) -> Dict[str, int]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NUMBERS
return {"index": 0, "columns": 1}
@property
def _AXIS_NAMES(self) -> Dict[int, str]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NAMES
return {0: "index", 1: "columns"}
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._add_numeric_operations()
ops.add_flex_arithmetic_methods(DataFrame)
def _from_nested_dict(data) -> collections.defaultdict:
new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
|
# Natural Language Toolkit: Compatibility Functions
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <sb@csse.unimelb.edu.au>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Backwards compatibility with previous versions of Python.
This module provides backwards compatibility by defining
functions and classes that were not available in earlier versions of
Python. Intented usage:
>>> from nltk.compat import *
Currently, NLTK requires Python 2.4 or later.
"""
######################################################################
# New in Python 2.5
######################################################################
# ElementTree
try:
from xml.etree import ElementTree
except ImportError:
from nltk.etree import ElementTree
# collections.defaultdict
# originally contributed by Yoav Goldberg <yoav.goldberg@gmail.com>
# new version by Jason Kirtland from Python cookbook.
# <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/523034>
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
# [XX] to make pickle happy in python 2.4:
import collections
collections.defaultdict = defaultdict
__all__ = ['ElementTree', 'defaultdict']
|
from django.contrib import admin
# Register your models here.
from .models import commands
class CommandsAdmin(admin.ModelAdmin):
list_display = ('title', 'command', 'describe')
admin.site.register(commands, CommandsAdmin)
|
# -*- coding: utf-8 -*- #
# Copyright 2014 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for reading and manipulating VPN Gateways."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
class VpnTunnels(base.Group):
"""Read and manipulate Compute Engine VPN tunnels."""
# Placeholder to indicate that a detailed_help field exists and should
# be set outside the class definition.
detailed_help = None
VpnTunnels.category = base.NETWORKING_CATEGORY
VpnTunnels.detailed_help = {
'DESCRIPTION': """
Read and manipulate Cloud VPN tunnels.
For more information about Cloud VPN tunnels, see the
[Cloud VPN tunnels documentation](https://cloud.google.com//network-connectivity/docs/vpn/concepts/overview).
See also: [VPN tunnels API](https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels).
""",
}
|
# This is free and unencumbered software released into the public domain.
class Message:
"""A message."""
def __init__(self, id=None):
self.id = id
def __repr__(self):
"""Returns a human-readable string representation of this object."""
return "message{{id={}}}".format(self.id)
def __str__(self):
"""Returns a human-readable string representation of this object."""
return self.__repr__()
|
import logging
from cached_property import threaded_cached_property
from .credentials import BaseCredentials
from .protocol import RetryPolicy, FailFast
from .transport import AUTH_TYPE_MAP
from .util import split_url
from .version import Version
log = logging.getLogger(__name__)
class Configuration:
"""
Assembles a connection protocol when autodiscover is not used.
If the server is not configured with autodiscover, the following should be sufficient:
config = Configuration(server='example.com', credentials=Credentials('MYWINDOMAIN\\myusername', 'topsecret'))
account = Account(primary_smtp_address='john@example.com', config=config)
You can also set the EWS service endpoint directly:
config = Configuration(service_endpoint='https://mail.example.com/EWS/Exchange.asmx', credentials=...)
If you know which authentication type the server uses, you add that as a hint:
config = Configuration(service_endpoint='https://example.com/EWS/Exchange.asmx', auth_type=NTLM, credentials=..)
If you want to use autodiscover, don't use a Configuration object. Instead, set up an account like this:
credentials = Credentials(username='MYWINDOMAIN\\myusername', password='topsecret')
account = Account(primary_smtp_address='john@example.com', credentials=credentials, autodiscover=True)
"""
def __init__(self, credentials=None, server=None, service_endpoint=None, auth_type=None, version=None,
retry_policy=None):
if not isinstance(credentials, (BaseCredentials, type(None))):
raise ValueError("'credentials' %r must be a Credentials instance" % credentials)
if server and service_endpoint:
raise AttributeError("Only one of 'server' or 'service_endpoint' must be provided")
if auth_type is not None and auth_type not in AUTH_TYPE_MAP:
raise ValueError("'auth_type' %r must be one of %s"
% (auth_type, ', '.join("'%s'" % k for k in sorted(AUTH_TYPE_MAP.keys()))))
if not retry_policy:
retry_policy = FailFast()
if not isinstance(version, (Version, type(None))):
raise ValueError("'version' %r must be a Version instance" % version)
if not isinstance(retry_policy, RetryPolicy):
raise ValueError("'retry_policy' %r must be a RetryPolicy instance" % retry_policy)
self._credentials = credentials
if server:
self.service_endpoint = 'https://%s/EWS/Exchange.asmx' % server
else:
self.service_endpoint = service_endpoint
self.auth_type = auth_type
self.version = version
self.retry_policy = retry_policy
@property
def credentials(self):
# Do not update credentials from this class. Instead, do it from Protocol
return self._credentials
@threaded_cached_property
def server(self):
if not self.service_endpoint:
return None
return split_url(self.service_endpoint)[1]
def __repr__(self):
return self.__class__.__name__ + '(%s)' % ', '.join('%s=%r' % (k, getattr(self, k)) for k in (
'credentials', 'service_endpoint', 'auth_type', 'version', 'retry_policy'
))
|
# Copyright 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Declare a couple of functions called from Boost.Build
#
# Each function will receive as many arguments as there ":"-separated
# arguments in bjam call. Each argument is a list of strings.
# As a special exception (aka bug), if no arguments are passed in bjam,
# Python function will be passed a single empty list.
#
# All Python functions must return a list of strings, which may be empty.
def test1(l):
return ["foo", "bar"]
def test2(l, l2):
return [l[0], l2[0]]
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""`verdi data remote` command."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import io
import click
from aiida.cmdline.commands.cmd_data import verdi_data
from aiida.cmdline.params import arguments, types
from aiida.cmdline.utils import echo
from aiida.common.files import get_mode_string
@verdi_data.group('remote')
def remote():
"""Manipulate RemoteData objects (reference to remote folders).
A RemoteData can be thought as a "symbolic link" to a folder on one of the
Computers set up in AiiDA (e.g. where a CalcJob will run).
This folder is called "remote" in the sense that it is on a Computer and
not in the AiiDA repository. Note, however, that the "remote" computer
could also be "localhost"."""
@remote.command('ls')
@arguments.DATUM(type=types.DataParamType(sub_classes=('aiida.data:remote',)))
@click.option('-l', '--long', 'ls_long', is_flag=True, default=False, help='Display also file metadata.')
@click.option('-p', '--path', type=click.STRING, default='.', help='The folder to list.')
def remote_ls(ls_long, path, datum):
"""List content of a (sub)directory in a RemoteData object."""
import datetime
try:
content = datum.listdir_withattributes(path=path)
except (IOError, OSError) as err:
echo.echo_critical(
'Unable to access the remote folder or file, check if it exists.\n'
'Original error: {}'.format(str(err))
)
for metadata in content:
if ls_long:
mtime = datetime.datetime.fromtimestamp(metadata['attributes'].st_mtime)
pre_line = '{} {:10} {} '.format(
get_mode_string(metadata['attributes'].st_mode), metadata['attributes'].st_size,
mtime.strftime('%d %b %Y %H:%M')
)
click.echo(pre_line, nl=False)
if metadata['isdir']:
click.echo(click.style(metadata['name'], fg='blue'))
else:
click.echo(metadata['name'])
@remote.command('cat')
@arguments.DATUM(type=types.DataParamType(sub_classes=('aiida.data:remote',)))
@click.argument('path', type=click.STRING)
def remote_cat(datum, path):
"""Show content of a file in a RemoteData object."""
import os
import sys
import tempfile
try:
with tempfile.NamedTemporaryFile(delete=False) as tmpf:
tmpf.close()
datum.getfile(path, tmpf.name)
with io.open(tmpf.name, encoding='utf8') as fhandle:
sys.stdout.write(fhandle.read())
except IOError as err:
echo.echo_critical('{}: {}'.format(err.errno, str(err)))
try:
os.remove(tmpf.name)
except OSError:
# If you cannot delete, ignore (maybe I didn't manage to create it in the first place
pass
@remote.command('show')
@arguments.DATUM(type=types.DataParamType(sub_classes=('aiida.data:remote',)))
def remote_show(datum):
"""Show information for a RemoteData object."""
click.echo('- Remote computer name:')
click.echo(' {}'.format(datum.get_computer_name()))
click.echo('- Remote folder full path:')
click.echo(' {}'.format(datum.get_remote_path()))
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/7/5 下午5:29
# @Author : Latent
# @Email : latentsky@gmail.com
# @File : sequence_nick.py
# @Software: PyCharm
# @class : 清晰店铺的相关信息
"""
字段说明:
1.nick_id ---->数据库自增
2.nick_name
3.nick
4.brand
5.company_name
6.platform
"""
from tools_class import Tools_Class
class Sequence_Nick(object):
# 品牌提取
@classmethod
def sequence_brand(cls, data):
# 1. 店铺名称
seller = Sequence_Nick.sequence_seller(data=data)
# 2.店铺编号
sid = Tools_Class.tools_md5(nick=seller)
# 3. 品牌
brand = data['public']['brand']
# 4. 平台
platform = data['platform']
if platform == 'taobao':
tmall = data['public']['tmall']
if tmall:
platform = 'tmall'
else:
platform = 'taobao'
platform = {
'seller': seller,
'nick_id': sid,
'brand': brand,
'platform': platform
}
return platform
# 商品店铺名称
@classmethod
def sequence_seller(cls, data):
try:
seller = data['seller']
except KeyError as k:
seller = data['seller_nick']
if seller is None:
seller = data['public']['nick']
return seller
|
import setuptools
with open("README.md", mode="r", encoding="utf-8") as readme_file:
long_description = readme_file.read()
setuptools.setup(
name="DialogTag",
version="1.1.3",
author="Bhavitvya Malik",
author_email="bhavitvya.malik@gmail.com",
description="A python library to classify dialogue tag.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bhavitvyamalik/DialogTag",
packages=setuptools.find_packages(),
install_requires=[
'transformers>=3.0.0',
'tqdm',
'tensorflow>=2.0.0'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
keywords="Tensorflow BERT NLP deep learning Transformer Networks "
)
|
"""
Linear SVM
==========
This script fits a linear support vector machine classifier to random data. It
illustrates how a function defined purely by NumPy operations can be minimized
directly with a gradient-based solver.
"""
import numpy as np
from autodiff.optimize import fmin_l_bfgs_b
def test_svm():
rng = np.random.RandomState(1)
# -- create some fake data
x = rng.rand(10, 5)
y = 2 * (rng.rand(10) > 0.5) - 1
l2_regularization = 1e-4
# -- loss function
def loss_fn(weights, bias):
margin = y * (np.dot(x, weights) + bias)
loss = np.maximum(0, 1 - margin) ** 2
l2_cost = 0.5 * l2_regularization * np.dot(weights, weights)
loss = np.mean(loss) + l2_cost
print('ran loss_fn(), returning {}'.format(loss))
return loss
# -- call optimizer
w_0, b_0 = np.zeros(5), np.zeros(())
w, b = fmin_l_bfgs_b(loss_fn, init_args=(w_0, b_0))
final_loss = loss_fn(w, b)
assert np.allclose(final_loss, 0.7229)
print('optimization successful!')
if __name__ == '__main__':
test_svm()
|
try:
from prawframe.obfuscation import Scrambler
except ImportError:
from .obfuscation import Encryptor
def bytes_packet(_bytes, termination_string=']'):
"""
Create a packet containing the amount of bytes for the proceeding data.
:param _bytes:
:param termination_string:
:return:
"""
return '{}{}'.format(len(_bytes), termination_string)
def scrambles_input_unscrambles_output(func):
scrambler = Encryptor().load_key_file()
def decorator(*args, **kwargs):
args = list(args)
args[0] = scrambler.encrypt(args[0])
result = func(*args, **kwargs)
descrabled = scrambler.decrypt(result)
return descrabled
return decorator
def unscrambles_output(func):
scrambler = Encryptor().load_key_file()
def decorator(*args, **kwargs):
args = list(args)
scrambled_result = func(*args, **kwargs)
result = scrambler.decrypt(scrambled_result)
return result
return decorator
def scrambles_input(func):
scrambler = Encryptor().load_key_file()
def decorator(*args, **kwargs):
args = list(args)
args[0] = scrambler.encrypt(args[0])
result = func(*args, **kwargs)
return result
return decorator
|
import torch.nn as nn
import torch
class TransHeadNet(nn.Module):
def __init__(self, in_channels, num_layers=3, num_filters=256, kernel_size=3, output_dim=3, freeze=False,
with_bias_end=True):
super(TransHeadNet, self).__init__()
self.freeze = freeze
if kernel_size == 3:
padding = 1
elif kernel_size == 2:
padding = 0
self.features = nn.ModuleList()
for i in range(num_layers):
_in_channels = in_channels if i == 0 else num_filters
self.features.append(nn.Conv2d(_in_channels, num_filters, kernel_size=kernel_size, stride=1, padding=padding, bias=False))
self.features.append(nn.BatchNorm2d(num_filters))
self.features.append(nn.ReLU(inplace=True))
self.linears = nn.ModuleList()
self.linears.append(nn.Linear(256 * 8 * 8, 4096))
self.linears.append(nn.ReLU(inplace=True))
self.linears.append(nn.Linear(4096, 4096))
self.linears.append(nn.ReLU(inplace=True))
self.linears.append(nn.Linear(4096, output_dim))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
if with_bias_end and (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.001)
def forward(self, x):
if self.freeze:
with torch.no_grad():
for i, l in enumerate(self.features):
x = l(x)
x = x.view(-1, 256*8*8)
for i, l in enumerate(self.linears):
x = l(x)
return x.detach()
else:
for i, l in enumerate(self.features):
x = l(x)
x = x.view(-1, 256*8*8)
for i, l in enumerate(self.linears):
x = l(x)
return x
|
#!/usr/bin/env python
#
# File: cont_pipeline.py
#
# Created: Friday, July 15 2016 by rejuvyesh <mail@rejuvyesh.com>
#
import argparse
import os
import yaml
import shutil
import rltools
from pipelines import pipeline
# Fix python 2.x
try:
input = raw_input
except NameError:
pass
def phase_train(spec, spec_file):
rltools.util.header('=== Running {} ==='.format(spec_file))
# Make checkpoint dir. All outputs go here
storagedir = spec['options']['storagedir']
n_workers = spec['options']['n_workers']
checkptdir = os.path.join(spec['options']['storagedir'], spec['options']['checkpt_subdir'])
rltools.util.mkdir_p(checkptdir)
assert not os.listdir(checkptdir), 'Checkpoint directory {} is not empty!'.format(checkptdir)
cmd_templates, output_filenames, argdicts = [], [], []
for alg in spec['training']['algorithms']:
for bline in spec['training']['baselines']:
for n_ev in spec['n_evaders']:
for n_pu in spec['n_pursuers']:
for n_se in spec['n_sensors']:
for n_co in spec['n_coop']:
# Number of cooperating agents can't be greater than pursuers
if n_co > n_pu:
continue
for f_rew in spec['food_reward']:
for p_rew in spec['poison_reward']:
for e_rew in spec['encounter_reward']:
for disc in spec['discounts']:
for gae in spec['gae_lambdas']:
for run in range(spec['training']['runs']):
strid = 'alg={},bline={},n_ev={},n_pu={},n_se={},n_co={},f_rew={},p_rew={},e_rew={},disc={},gae={},run={}'.format(
alg['name'], bline, n_ev, n_pu, n_se, n_co,
f_rew, p_rew, e_rew, disc, gae, run)
cmd_templates.append(alg['cmd'].replace(
'\n', ' ').strip())
output_filenames.append(strid + '.txt')
argdicts.append({
'baseline_type': bline,
'n_evaders': n_ev,
'n_pursuers': n_pu,
'n_sensors': n_se,
'n_coop': n_co,
'discount': disc,
'food_reward': f_rew,
'poison_reward': p_rew,
'encounter_reward': e_rew,
'gae_lambda': gae,
'log': os.path.join(checkptdir,
strid + '.h5')
})
rltools.util.ok('{} jobs to run...'.format(len(cmd_templates)))
rltools.util.warn('Continue? y/n')
if input() == 'y':
pipeline.run_jobs(cmd_templates, output_filenames, argdicts, storagedir,
n_workers=n_workers)
else:
rltools.util.failure('Canceled.')
sys.exit(1)
# Copy the pipeline yaml file to the output dir too
shutil.copyfile(spec_file, os.path.join(checkptdir, 'pipeline.yaml'))
# Keep git commit
import subprocess
git_hash = subprocess.check_output('git rev-parse HEAD', shell=True).strip()
with open(os.path.join(checkptdir, 'git_hash.txt'), 'w') as f:
f.write(git_hash + '\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('spec', type=str)
args = parser.parse_args()
with open(args.spec, 'r') as f:
spec = yaml.load(f)
phase_train(spec, args.spec)
if __name__ == '__main__':
main()
|
import sys
sys.path.insert(0,'../../../deeplab-public-ver2/python')
import caffe
import leveldb
import numpy as np
from caffe.proto import caffe_pb2
import csv
import cv2
# Wei Yang 2015-08-19
# Source
# Read LevelDB/LMDB
# ==================
# http://research.beenfrog.com/code/2015/03/28/read-leveldb-lmdb-for-caffe-with-python.html
# Plot image
# ==================
# http://www.pyimagesearch.com/2014/11/03/display-matplotlib-rgb-image/
# Creating LMDB in python
# ==================
# http://deepdish.io/2015/04/28/creating-lmdb-in-python/
leveldb_dir = "../../../../datasets/planet_cloudless/leveldb/train_leveldb"
PC_DIR = "../../../../datasets/planet_cloudless/"
OUT_DIR = PC_DIR + "images/"
w_train = csv.writer(open(PC_DIR + "train.csv", 'w'), delimiter=" ")
db = leveldb.LevelDB(leveldb_dir)
datum = caffe_pb2.Datum()
img_no = 0
for key, value in db.RangeIter():
datum.ParseFromString(value)
label = datum.label
data = caffe.io.datum_to_array(datum)
r = data[0,:,:]
g = data[1,:,:]
b = data[2,:,:]
#rgb rbg gbr grb brg bgr
image = cv2.merge([r,b,g])
cv2.imwrite(OUT_DIR + str(img_no).zfill(10) + '.jpg', image)
w_train.writerow([OUT_DIR + str(img_no).zfill(10) + '.jpg', label])
img_no += 1
|
import sys
import os
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('./demo/'))
from determined_ai_sphinx_theme import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxcontrib.httpdomain',
]
# Do not warn about external images (status badges in README.rst)
suppress_warnings = ['image.nonlocal_uri']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyTorch Sphinx Theme'
copyright = u'PyTorch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
intersphinx_mapping = {'rtd': ('https://docs.readthedocs.io/en/latest/', None)}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'determined_ai_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["../"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = "demo/static/pytorch-logo-dark.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DeterminedAISphinxthemedemodoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyTorchthemedemo.tex', u'PyTorch theme demo Documentation',
u'PyTorch, PyTorch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytorchthemedemo', u'PyTorch theme demo Documentation',
[u'PyTorch'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyTorchthemedemo', u'PyTorch theme demo Documentation',
u'PyTorch', 'PyTorchthemedemo',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.